repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
thewtex/scikit-image
[ "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97", "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97", "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97", "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97", "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97", "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97", "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97", "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97", "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97", "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97", "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97" ]
[ "skimage/segmentation/tests/test_random_walker.py", "skimage/restoration/deconvolution.py", "skimage/feature/peak.py", "skimage/feature/tests/test_register_translation.py", "skimage/color/tests/test_delta_e.py", "skimage/segmentation/random_walker_segmentation.py", "skimage/restoration/tests/test_inpaint.py", "doc/examples/filters/plot_restoration.py", "skimage/segmentation/tests/test_boundaries.py", "skimage/util/tests/test_invert.py", "skimage/_shared/tests/test_utils.py" ]
[ "import numpy as np\nfrom skimage.segmentation import random_walker\nfrom skimage.transform import resize\nfrom skimage._shared._warnings import expected_warnings\nfrom skimage._shared import testing\n\n\n# older versions of scipy raise a warning with new NumPy because they use\n# numpy.rank() instead of arr.ndim or numpy.linalg.matrix_rank.\nSCIPY_EXPECTED = 'numpy.linalg.matrix_rank|\\A\\Z'\nPYAMG_EXPECTED_WARNING = 'pyamg|\\A\\Z'\nPYAMG_SCIPY_EXPECTED = SCIPY_EXPECTED + '|' + PYAMG_EXPECTED_WARNING\n\n\ndef make_2d_syntheticdata(lx, ly=None):\n if ly is None:\n ly = lx\n np.random.seed(1234)\n data = np.zeros((lx, ly)) + 0.1 * np.random.randn(lx, ly)\n small_l = int(lx // 5)\n data[lx // 2 - small_l:lx // 2 + small_l,\n ly // 2 - small_l:ly // 2 + small_l] = 1\n data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,\n ly // 2 - small_l + 1:ly // 2 + small_l - 1] = (\n 0.1 * np.random.randn(2 * small_l - 2, 2 * small_l - 2))\n data[lx // 2 - small_l, ly // 2 - small_l // 8:ly // 2 + small_l // 8] = 0\n seeds = np.zeros_like(data)\n seeds[lx // 5, ly // 5] = 1\n seeds[lx // 2 + small_l // 4, ly // 2 - small_l // 4] = 2\n return data, seeds\n\n\ndef make_3d_syntheticdata(lx, ly=None, lz=None):\n if ly is None:\n ly = lx\n if lz is None:\n lz = lx\n np.random.seed(1234)\n data = np.zeros((lx, ly, lz)) + 0.1 * np.random.randn(lx, ly, lz)\n small_l = int(lx // 5)\n data[lx // 2 - small_l:lx // 2 + small_l,\n ly // 2 - small_l:ly // 2 + small_l,\n lz // 2 - small_l:lz // 2 + small_l] = 1\n data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,\n ly // 2 - small_l + 1:ly // 2 + small_l - 1,\n lz // 2 - small_l + 1:lz // 2 + small_l - 1] = 0\n # make a hole\n hole_size = np.max([1, small_l // 8])\n data[lx // 2 - small_l,\n ly // 2 - hole_size:ly // 2 + hole_size,\n lz // 2 - hole_size:lz // 2 + hole_size] = 0\n seeds = np.zeros_like(data)\n seeds[lx // 5, ly // 5, lz // 5] = 1\n seeds[lx // 2 + small_l // 4,\n ly // 2 - small_l // 4,\n lz // 2 - small_l // 4] = 2\n return data, seeds\n\n\ndef test_2d_bf():\n lx = 70\n ly = 100\n data, labels = make_2d_syntheticdata(lx, ly)\n labels_bf = random_walker(data, labels, beta=90, mode='bf')\n assert (labels_bf[25:45, 40:60] == 2).all()\n assert data.shape == labels.shape\n full_prob_bf = random_walker(data, labels, beta=90, mode='bf',\n return_full_prob=True)\n assert (full_prob_bf[1, 25:45, 40:60] >=\n full_prob_bf[0, 25:45, 40:60]).all()\n assert data.shape == labels.shape\n # Now test with more than two labels\n labels[55, 80] = 3\n full_prob_bf = random_walker(data, labels, beta=90, mode='bf',\n return_full_prob=True)\n assert (full_prob_bf[1, 25:45, 40:60] >=\n full_prob_bf[0, 25:45, 40:60]).all()\n assert len(full_prob_bf) == 3\n assert data.shape == labels.shape\n\n\ndef test_2d_cg():\n lx = 70\n ly = 100\n data, labels = make_2d_syntheticdata(lx, ly)\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n labels_cg = random_walker(data, labels, beta=90, mode='cg')\n assert (labels_cg[25:45, 40:60] == 2).all()\n assert data.shape == labels.shape\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n full_prob = random_walker(data, labels, beta=90, mode='cg',\n return_full_prob=True)\n assert (full_prob[1, 25:45, 40:60] >=\n full_prob[0, 25:45, 40:60]).all()\n assert data.shape == labels.shape\n return data, labels_cg\n\n\ndef test_2d_cg_mg():\n lx = 70\n ly = 100\n data, labels = make_2d_syntheticdata(lx, ly)\n expected = 'scipy.sparse.sparsetools|%s' % PYAMG_SCIPY_EXPECTED\n with expected_warnings([expected]):\n labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')\n assert (labels_cg_mg[25:45, 40:60] == 2).all()\n assert data.shape == labels.shape\n with expected_warnings([expected]):\n full_prob = random_walker(data, labels, beta=90, mode='cg_mg',\n return_full_prob=True)\n assert (full_prob[1, 25:45, 40:60] >=\n full_prob[0, 25:45, 40:60]).all()\n assert data.shape == labels.shape\n return data, labels_cg_mg\n\n\ndef test_types():\n lx = 70\n ly = 100\n data, labels = make_2d_syntheticdata(lx, ly)\n data = 255 * (data - data.min()) // (data.max() - data.min())\n data = data.astype(np.uint8)\n with expected_warnings([PYAMG_SCIPY_EXPECTED]):\n labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')\n assert (labels_cg_mg[25:45, 40:60] == 2).all()\n assert data.shape == labels.shape\n return data, labels_cg_mg\n\n\ndef test_reorder_labels():\n lx = 70\n ly = 100\n data, labels = make_2d_syntheticdata(lx, ly)\n labels[labels == 2] = 4\n labels_bf = random_walker(data, labels, beta=90, mode='bf')\n assert (labels_bf[25:45, 40:60] == 2).all()\n assert data.shape == labels.shape\n return data, labels_bf\n\n\ndef test_2d_inactive():\n lx = 70\n ly = 100\n data, labels = make_2d_syntheticdata(lx, ly)\n labels[10:20, 10:20] = -1\n labels[46:50, 33:38] = -2\n labels = random_walker(data, labels, beta=90)\n assert (labels.reshape((lx, ly))[25:45, 40:60] == 2).all()\n assert data.shape == labels.shape\n return data, labels\n\n\ndef test_3d():\n n = 30\n lx, ly, lz = n, n, n\n data, labels = make_3d_syntheticdata(lx, ly, lz)\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n labels = random_walker(data, labels, mode='cg')\n assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()\n assert data.shape == labels.shape\n return data, labels\n\n\ndef test_3d_inactive():\n n = 30\n lx, ly, lz = n, n, n\n data, labels = make_3d_syntheticdata(lx, ly, lz)\n old_labels = np.copy(labels)\n labels[5:25, 26:29, 26:29] = -1\n after_labels = np.copy(labels)\n with expected_warnings(['\"cg\" mode|CObject type' + '|' + SCIPY_EXPECTED]):\n labels = random_walker(data, labels, mode='cg')\n assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()\n assert data.shape == labels.shape\n return data, labels, old_labels, after_labels\n\n\ndef test_multispectral_2d():\n lx, ly = 70, 100\n data, labels = make_2d_syntheticdata(lx, ly)\n data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n multi_labels = random_walker(data, labels, mode='cg',\n multichannel=True)\n assert data[..., 0].shape == labels.shape\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n single_labels = random_walker(data[..., 0], labels, mode='cg')\n assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all()\n assert data[..., 0].shape == labels.shape\n return data, multi_labels, single_labels, labels\n\n\ndef test_multispectral_3d():\n n = 30\n lx, ly, lz = n, n, n\n data, labels = make_3d_syntheticdata(lx, ly, lz)\n data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n multi_labels = random_walker(data, labels, mode='cg',\n multichannel=True)\n assert data[..., 0].shape == labels.shape\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n single_labels = random_walker(data[..., 0], labels, mode='cg')\n assert (multi_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()\n assert (single_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()\n assert data[..., 0].shape == labels.shape\n return data, multi_labels, single_labels, labels\n\n\ndef test_spacing_0():\n n = 30\n lx, ly, lz = n, n, n\n data, _ = make_3d_syntheticdata(lx, ly, lz)\n\n # Rescale `data` along Z axis\n data_aniso = np.zeros((n, n, n // 2))\n for i, yz in enumerate(data):\n data_aniso[i, :, :] = resize(yz, (n, n // 2),\n mode='constant',\n anti_aliasing=False)\n\n # Generate new labels\n small_l = int(lx // 5)\n labels_aniso = np.zeros_like(data_aniso)\n labels_aniso[lx // 5, ly // 5, lz // 5] = 1\n labels_aniso[lx // 2 + small_l // 4,\n ly // 2 - small_l // 4,\n lz // 4 - small_l // 8] = 2\n\n # Test with `spacing` kwarg\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',\n spacing=(1., 1., 0.5))\n\n assert (labels_aniso[13:17, 13:17, 7:9] == 2).all()\n\n\ndef test_spacing_1():\n n = 30\n lx, ly, lz = n, n, n\n data, _ = make_3d_syntheticdata(lx, ly, lz)\n\n # Rescale `data` along Y axis\n # `resize` is not yet 3D capable, so this must be done by looping in 2D.\n data_aniso = np.zeros((n, n * 2, n))\n for i, yz in enumerate(data):\n data_aniso[i, :, :] = resize(yz, (n * 2, n),\n mode='constant',\n anti_aliasing=False)\n\n # Generate new labels\n small_l = int(lx // 5)\n labels_aniso = np.zeros_like(data_aniso)\n labels_aniso[lx // 5, ly // 5, lz // 5] = 1\n labels_aniso[lx // 2 + small_l // 4,\n ly - small_l // 2,\n lz // 2 - small_l // 4] = 2\n\n # Test with `spacing` kwarg\n # First, anisotropic along Y\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',\n spacing=(1., 2., 1.))\n assert (labels_aniso[13:17, 26:34, 13:17] == 2).all()\n\n # Rescale `data` along X axis\n # `resize` is not yet 3D capable, so this must be done by looping in 2D.\n data_aniso = np.zeros((n, n * 2, n))\n for i in range(data.shape[1]):\n data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n),\n mode='constant',\n anti_aliasing=False)\n\n # Generate new labels\n small_l = int(lx // 5)\n labels_aniso2 = np.zeros_like(data_aniso)\n labels_aniso2[lx // 5, ly // 5, lz // 5] = 1\n labels_aniso2[lx - small_l // 2,\n ly // 2 + small_l // 4,\n lz // 2 - small_l // 4] = 2\n\n # Anisotropic along X\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n labels_aniso2 = random_walker(data_aniso,\n labels_aniso2,\n mode='cg', spacing=(2., 1., 1.))\n assert (labels_aniso2[26:34, 13:17, 13:17] == 2).all()\n\n\ndef test_trivial_cases():\n # When all voxels are labeled\n img = np.ones((10, 10))\n labels = np.ones((10, 10))\n\n with expected_warnings([\"Returning provided labels\"]):\n pass_through = random_walker(img, labels)\n np.testing.assert_array_equal(pass_through, labels)\n\n # When all voxels are labeled AND return_full_prob is True\n labels[:, :5] = 3\n expected = np.concatenate(((labels == 1)[..., np.newaxis],\n (labels == 3)[..., np.newaxis]), axis=2)\n with expected_warnings([\"Returning provided labels\"]):\n test = random_walker(img, labels, return_full_prob=True)\n np.testing.assert_array_equal(test, expected)\n\n\ndef test_length2_spacing():\n # If this passes without raising an exception (warnings OK), the new\n # spacing code is working properly.\n np.random.seed(42)\n img = np.ones((10, 10)) + 0.2 * np.random.normal(size=(10, 10))\n labels = np.zeros((10, 10), dtype=np.uint8)\n labels[2, 4] = 1\n labels[6, 8] = 4\n random_walker(img, labels, spacing=(1., 2.))\n\n\ndef test_bad_inputs():\n # Too few dimensions\n img = np.ones(10)\n labels = np.arange(10)\n with testing.raises(ValueError):\n random_walker(img, labels)\n with testing.raises(ValueError):\n random_walker(img, labels, multichannel=True)\n\n # Too many dimensions\n np.random.seed(42)\n img = np.random.normal(size=(3, 3, 3, 3, 3))\n labels = np.arange(3 ** 5).reshape(img.shape)\n with testing.raises(ValueError):\n random_walker(img, labels)\n with testing.raises(ValueError):\n random_walker(img, labels, multichannel=True)\n\n # Spacing incorrect length\n img = np.random.normal(size=(10, 10))\n labels = np.zeros((10, 10))\n labels[2, 4] = 2\n labels[6, 8] = 5\n with testing.raises(ValueError):\n random_walker(img, labels, spacing=(1,))\n\n # Invalid mode\n img = np.random.normal(size=(10, 10))\n labels = np.zeros((10, 10))\n with testing.raises(ValueError):\n random_walker(img, labels, mode='bad')\n\n\ndef test_isolated_seeds():\n np.random.seed(0)\n a = np.random.random((7, 7))\n mask = - np.ones(a.shape)\n # This pixel is an isolated seed\n mask[1, 1] = 1\n # Unlabeled pixels\n mask[3:, 3:] = 0\n # Seeds connected to unlabeled pixels\n mask[4, 4] = 2\n mask[6, 6] = 1\n\n # Test that no error is raised, and that labels of isolated seeds are OK\n res = random_walker(a, mask)\n assert res[1, 1] == 1\n res = random_walker(a, mask, return_full_prob=True)\n assert res[0, 1, 1] == 1\n assert res[1, 1, 1] == 0\n", "\"\"\"Implementations restoration functions\"\"\"\n\n\nimport numpy as np\nimport numpy.random as npr\nfrom scipy.signal import fftconvolve, convolve\n\nfrom . import uft\n\n__keywords__ = \"restoration, image, deconvolution\"\n\n\ndef wiener(image, psf, balance, reg=None, is_real=True, clip=True):\n \"\"\"Wiener-Hunt deconvolution\n\n Return the deconvolution with a Wiener-Hunt approach (i.e. with\n Fourier diagonalisation).\n\n Parameters\n ----------\n image : (M, N) ndarray\n Input degraded image\n psf : ndarray\n Point Spread Function. This is assumed to be the impulse\n response (input image space) if the data-type is real, or the\n transfer function (Fourier space) if the data-type is\n complex. There is no constraints on the shape of the impulse\n response. The transfer function must be of shape `(M, N)` if\n `is_real is True`, `(M, N // 2 + 1)` otherwise (see\n `np.fft.rfftn`).\n balance : float\n The regularisation parameter value that tunes the balance\n between the data adequacy that improve frequency restoration\n and the prior adequacy that reduce frequency restoration (to\n avoid noise artifacts).\n reg : ndarray, optional\n The regularisation operator. The Laplacian by default. It can\n be an impulse response or a transfer function, as for the\n psf. Shape constraint is the same as for the `psf` parameter.\n is_real : boolean, optional\n True by default. Specify if ``psf`` and ``reg`` are provided\n with hermitian hypothesis, that is only half of the frequency\n plane is provided (due to the redundancy of Fourier transform\n of real signal). It's apply only if ``psf`` and/or ``reg`` are\n provided as transfer function. For the hermitian property see\n ``uft`` module or ``np.fft.rfftn``.\n clip : boolean, optional\n True by default. If True, pixel values of the result above 1 or\n under -1 are thresholded for skimage pipeline compatibility.\n\n Returns\n -------\n im_deconv : (M, N) ndarray\n The deconvolved image.\n\n Examples\n --------\n >>> from skimage import color, data, restoration\n >>> img = color.rgb2gray(data.astronaut())\n >>> from scipy.signal import convolve2d\n >>> psf = np.ones((5, 5)) / 25\n >>> img = convolve2d(img, psf, 'same')\n >>> img += 0.1 * img.std() * np.random.standard_normal(img.shape)\n >>> deconvolved_img = restoration.wiener(img, psf, 1100)\n\n Notes\n -----\n This function applies the Wiener filter to a noisy and degraded\n image by an impulse response (or PSF). If the data model is\n\n .. math:: y = Hx + n\n\n where :math:`n` is noise, :math:`H` the PSF and :math:`x` the\n unknown original image, the Wiener filter is\n\n .. math::\n \\hat x = F^\\dagger (|\\Lambda_H|^2 + \\lambda |\\Lambda_D|^2)\n \\Lambda_H^\\dagger F y\n\n where :math:`F` and :math:`F^\\dagger` are the Fourier and inverse\n Fourier transfroms respectively, :math:`\\Lambda_H` the transfer\n function (or the Fourier transfrom of the PSF, see [Hunt] below)\n and :math:`\\Lambda_D` the filter to penalize the restored image\n frequencies (Laplacian by default, that is penalization of high\n frequency). The parameter :math:`\\lambda` tunes the balance\n between the data (that tends to increase high frequency, even\n those coming from noise), and the regularization.\n\n These methods are then specific to a prior model. Consequently,\n the application or the true image nature must corresponds to the\n prior model. By default, the prior model (Laplacian) introduce\n image smoothness or pixel correlation. It can also be interpreted\n as high-frequency penalization to compensate the instability of\n the solution with respect to the data (sometimes called noise\n amplification or \"explosive\" solution).\n\n Finally, the use of Fourier space implies a circulant property of\n :math:`H`, see [Hunt].\n\n References\n ----------\n .. [1] François Orieux, Jean-François Giovannelli, and Thomas\n Rodet, \"Bayesian estimation of regularization and point\n spread function parameters for Wiener-Hunt deconvolution\",\n J. Opt. Soc. Am. A 27, 1593-1607 (2010)\n\n http://www.opticsinfobase.org/josaa/abstract.cfm?URI=josaa-27-7-1593\n\n http://research.orieux.fr/files/papers/OGR-JOSA10.pdf\n\n .. [2] B. R. Hunt \"A matrix theory proof of the discrete\n convolution theorem\", IEEE Trans. on Audio and\n Electroacoustics, vol. au-19, no. 4, pp. 285-288, dec. 1971\n \"\"\"\n if reg is None:\n reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)\n if not np.iscomplexobj(reg):\n reg = uft.ir2tf(reg, image.shape, is_real=is_real)\n\n if psf.shape != reg.shape:\n trans_func = uft.ir2tf(psf, image.shape, is_real=is_real)\n else:\n trans_func = psf\n\n wiener_filter = np.conj(trans_func) / (np.abs(trans_func) ** 2 +\n balance * np.abs(reg) ** 2)\n if is_real:\n deconv = uft.uirfft2(wiener_filter * uft.urfft2(image),\n shape=image.shape)\n else:\n deconv = uft.uifft2(wiener_filter * uft.ufft2(image))\n\n if clip:\n deconv[deconv > 1] = 1\n deconv[deconv < -1] = -1\n\n return deconv\n\n\ndef unsupervised_wiener(image, psf, reg=None, user_params=None, is_real=True,\n clip=True):\n \"\"\"Unsupervised Wiener-Hunt deconvolution.\n\n Return the deconvolution with a Wiener-Hunt approach, where the\n hyperparameters are automatically estimated. The algorithm is a\n stochastic iterative process (Gibbs sampler) described in the\n reference below. See also ``wiener`` function.\n\n Parameters\n ----------\n image : (M, N) ndarray\n The input degraded image.\n psf : ndarray\n The impulse response (input image's space) or the transfer\n function (Fourier space). Both are accepted. The transfer\n function is automatically recognized as being complex\n (``np.iscomplexobj(psf)``).\n reg : ndarray, optional\n The regularisation operator. The Laplacian by default. It can\n be an impulse response or a transfer function, as for the psf.\n user_params : dict\n Dictionary of parameters for the Gibbs sampler. See below.\n clip : boolean, optional\n True by default. If true, pixel values of the result above 1 or\n under -1 are thresholded for skimage pipeline compatibility.\n\n Returns\n -------\n x_postmean : (M, N) ndarray\n The deconvolved image (the posterior mean).\n chains : dict\n The keys ``noise`` and ``prior`` contain the chain list of\n noise and prior precision respectively.\n\n Other parameters\n ----------------\n The keys of ``user_params`` are:\n\n threshold : float\n The stopping criterion: the norm of the difference between to\n successive approximated solution (empirical mean of object\n samples, see Notes section). 1e-4 by default.\n burnin : int\n The number of sample to ignore to start computation of the\n mean. 15 by default.\n min_iter : int\n The minimum number of iterations. 30 by default.\n max_iter : int\n The maximum number of iterations if ``threshold`` is not\n satisfied. 200 by default.\n callback : callable (None by default)\n A user provided callable to which is passed, if the function\n exists, the current image sample for whatever purpose. The user\n can store the sample, or compute other moments than the\n mean. It has no influence on the algorithm execution and is\n only for inspection.\n\n Examples\n --------\n >>> from skimage import color, data, restoration\n >>> img = color.rgb2gray(data.astronaut())\n >>> from scipy.signal import convolve2d\n >>> psf = np.ones((5, 5)) / 25\n >>> img = convolve2d(img, psf, 'same')\n >>> img += 0.1 * img.std() * np.random.standard_normal(img.shape)\n >>> deconvolved_img = restoration.unsupervised_wiener(img, psf)\n\n Notes\n -----\n The estimated image is design as the posterior mean of a\n probability law (from a Bayesian analysis). The mean is defined as\n a sum over all the possible images weighted by their respective\n probability. Given the size of the problem, the exact sum is not\n tractable. This algorithm use of MCMC to draw image under the\n posterior law. The practical idea is to only draw highly probable\n images since they have the biggest contribution to the mean. At the\n opposite, the less probable images are drawn less often since\n their contribution is low. Finally the empirical mean of these\n samples give us an estimation of the mean, and an exact\n computation with an infinite sample set.\n\n References\n ----------\n .. [1] François Orieux, Jean-François Giovannelli, and Thomas\n Rodet, \"Bayesian estimation of regularization and point\n spread function parameters for Wiener-Hunt deconvolution\",\n J. Opt. Soc. Am. A 27, 1593-1607 (2010)\n\n http://www.opticsinfobase.org/josaa/abstract.cfm?URI=josaa-27-7-1593\n\n http://research.orieux.fr/files/papers/OGR-JOSA10.pdf\n \"\"\"\n params = {'threshold': 1e-4, 'max_iter': 200,\n 'min_iter': 30, 'burnin': 15, 'callback': None}\n params.update(user_params or {})\n\n if reg is None:\n reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)\n if not np.iscomplexobj(reg):\n reg = uft.ir2tf(reg, image.shape, is_real=is_real)\n\n if psf.shape != reg.shape:\n trans_fct = uft.ir2tf(psf, image.shape, is_real=is_real)\n else:\n trans_fct = psf\n\n # The mean of the object\n x_postmean = np.zeros(trans_fct.shape)\n # The previous computed mean in the iterative loop\n prev_x_postmean = np.zeros(trans_fct.shape)\n\n # Difference between two successive mean\n delta = np.NAN\n\n # Initial state of the chain\n gn_chain, gx_chain = [1], [1]\n\n # The correlation of the object in Fourier space (if size is big,\n # this can reduce computation time in the loop)\n areg2 = np.abs(reg) ** 2\n atf2 = np.abs(trans_fct) ** 2\n\n # The Fourier transfrom may change the image.size attribut, so we\n # store it.\n if is_real:\n data_spectrum = uft.urfft2(image.astype(np.float))\n else:\n data_spectrum = uft.ufft2(image.astype(np.float))\n\n # Gibbs sampling\n for iteration in range(params['max_iter']):\n # Sample of Eq. 27 p(circX^k | gn^k-1, gx^k-1, y).\n\n # weighting (correlation in direct space)\n precision = gn_chain[-1] * atf2 + gx_chain[-1] * areg2 # Eq. 29\n excursion = np.sqrt(0.5) / np.sqrt(precision) * (\n np.random.standard_normal(data_spectrum.shape) +\n 1j * np.random.standard_normal(data_spectrum.shape))\n\n # mean Eq. 30 (RLS for fixed gn, gamma0 and gamma1 ...)\n wiener_filter = gn_chain[-1] * np.conj(trans_fct) / precision\n\n # sample of X in Fourier space\n x_sample = wiener_filter * data_spectrum + excursion\n if params['callback']:\n params['callback'](x_sample)\n\n # sample of Eq. 31 p(gn | x^k, gx^k, y)\n gn_chain.append(npr.gamma(image.size / 2,\n 2 / uft.image_quad_norm(data_spectrum -\n x_sample *\n trans_fct)))\n\n # sample of Eq. 31 p(gx | x^k, gn^k-1, y)\n gx_chain.append(npr.gamma((image.size - 1) / 2,\n 2 / uft.image_quad_norm(x_sample * reg)))\n\n # current empirical average\n if iteration > params['burnin']:\n x_postmean = prev_x_postmean + x_sample\n\n if iteration > (params['burnin'] + 1):\n current = x_postmean / (iteration - params['burnin'])\n previous = prev_x_postmean / (iteration - params['burnin'] - 1)\n\n delta = np.sum(np.abs(current - previous)) / \\\n np.sum(np.abs(x_postmean)) / (iteration - params['burnin'])\n\n prev_x_postmean = x_postmean\n\n # stop of the algorithm\n if (iteration > params['min_iter']) and (delta < params['threshold']):\n break\n\n # Empirical average \\approx POSTMEAN Eq. 44\n x_postmean = x_postmean / (iteration - params['burnin'])\n if is_real:\n x_postmean = uft.uirfft2(x_postmean, shape=image.shape)\n else:\n x_postmean = uft.uifft2(x_postmean)\n\n if clip:\n x_postmean[x_postmean > 1] = 1\n x_postmean[x_postmean < -1] = -1\n\n return (x_postmean, {'noise': gn_chain, 'prior': gx_chain})\n\n\ndef richardson_lucy(image, psf, iterations=50, clip=True):\n \"\"\"Richardson-Lucy deconvolution.\n\n Parameters\n ----------\n image : ndarray\n Input degraded image (can be N dimensional).\n psf : ndarray\n The point spread function.\n iterations : int\n Number of iterations. This parameter plays the role of\n regularisation.\n clip : boolean, optional\n True by default. If true, pixel value of the result above 1 or\n under -1 are thresholded for skimage pipeline compatibility.\n\n Returns\n -------\n im_deconv : ndarray\n The deconvolved image.\n\n Examples\n --------\n >>> from skimage import color, data, restoration\n >>> camera = color.rgb2gray(data.camera())\n >>> from scipy.signal import convolve2d\n >>> psf = np.ones((5, 5)) / 25\n >>> camera = convolve2d(camera, psf, 'same')\n >>> camera += 0.1 * camera.std() * np.random.standard_normal(camera.shape)\n >>> deconvolved = restoration.richardson_lucy(camera, psf, 5)\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution\n \"\"\"\n # compute the times for direct convolution and the fft method. The fft is of\n # complexity O(N log(N)) for each dimension and the direct method does\n # straight arithmetic (and is O(n*k) to add n elements k times)\n direct_time = np.prod(image.shape + psf.shape)\n fft_time = np.sum([n*np.log(n) for n in image.shape + psf.shape])\n\n # see whether the fourier transform convolution method or the direct\n # convolution method is faster (discussed in scikit-image PR #1792)\n time_ratio = 40.032 * fft_time / direct_time\n\n if time_ratio <= 1 or len(image.shape) > 2:\n convolve_method = fftconvolve\n else:\n convolve_method = convolve\n\n image = image.astype(np.float)\n psf = psf.astype(np.float)\n im_deconv = 0.5 * np.ones(image.shape)\n psf_mirror = psf[::-1, ::-1]\n\n for _ in range(iterations):\n relative_blur = image / convolve_method(im_deconv, psf, 'same')\n im_deconv *= convolve_method(relative_blur, psf_mirror, 'same')\n\n if clip:\n im_deconv[im_deconv > 1] = 1\n im_deconv[im_deconv < -1] = -1\n\n return im_deconv\n", "import numpy as np\nimport scipy.ndimage as ndi\nfrom ..segmentation import relabel_sequential\nfrom .. import measure\nfrom ..filters import rank_order\n\n\ndef _get_high_intensity_peaks(image, mask, num_peaks):\n \"\"\"\n Return the highest intensity peak coordinates.\n \"\"\"\n # get coordinates of peaks\n coord = np.nonzero(mask)\n # select num_peaks peaks\n if len(coord[0]) > num_peaks:\n intensities = image[coord]\n idx_maxsort = np.argsort(intensities)\n coord = np.transpose(coord)[idx_maxsort][-num_peaks:]\n else:\n coord = np.column_stack(coord)\n # Higest peak first\n return coord[::-1]\n\n\ndef peak_local_max(image, min_distance=1, threshold_abs=None,\n threshold_rel=None, exclude_border=True, indices=True,\n num_peaks=np.inf, footprint=None, labels=None,\n num_peaks_per_label=np.inf):\n \"\"\"Find peaks in an image as coordinate list or boolean mask.\n\n Peaks are the local maxima in a region of `2 * min_distance + 1`\n (i.e. peaks are separated by at least `min_distance`).\n\n If peaks are flat (i.e. multiple adjacent pixels have identical\n intensities), the coordinates of all such pixels are returned.\n\n If both `threshold_abs` and `threshold_rel` are provided, the maximum\n of the two is chosen as the minimum intensity threshold of peaks.\n\n Parameters\n ----------\n image : ndarray\n Input image.\n min_distance : int, optional\n Minimum number of pixels separating peaks in a region of `2 *\n min_distance + 1` (i.e. peaks are separated by at least\n `min_distance`).\n To find the maximum number of peaks, use `min_distance=1`.\n threshold_abs : float, optional\n Minimum intensity of peaks. By default, the absolute threshold is\n the minimum intensity of the image.\n threshold_rel : float, optional\n Minimum intensity of peaks, calculated as `max(image) * threshold_rel`.\n exclude_border : int, optional\n If nonzero, `exclude_border` excludes peaks from\n within `exclude_border`-pixels of the border of the image.\n indices : bool, optional\n If True, the output will be an array representing peak\n coordinates. If False, the output will be a boolean array shaped as\n `image.shape` with peaks present at True elements.\n num_peaks : int, optional\n Maximum number of peaks. When the number of peaks exceeds `num_peaks`,\n return `num_peaks` peaks based on highest peak intensity.\n footprint : ndarray of bools, optional\n If provided, `footprint == 1` represents the local region within which\n to search for peaks at every point in `image`. Overrides\n `min_distance` (also for `exclude_border`).\n labels : ndarray of ints, optional\n If provided, each unique region `labels == value` represents a unique\n region to search for peaks. Zero is reserved for background.\n num_peaks_per_label : int, optional\n Maximum number of peaks for each label.\n\n Returns\n -------\n output : ndarray or ndarray of bools\n\n * If `indices = True` : (row, column, ...) coordinates of peaks.\n * If `indices = False` : Boolean array shaped like `image`, with peaks\n represented by True values.\n\n Notes\n -----\n The peak local maximum function returns the coordinates of local peaks\n (maxima) in an image. A maximum filter is used for finding local maxima.\n This operation dilates the original image. After comparison of the dilated\n and original image, this function returns the coordinates or a mask of the\n peaks where the dilated image equals the original image.\n\n Examples\n --------\n >>> img1 = np.zeros((7, 7))\n >>> img1[3, 4] = 1\n >>> img1[3, 2] = 1.5\n >>> img1\n array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 1.5, 0. , 1. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])\n\n >>> peak_local_max(img1, min_distance=1)\n array([[3, 4],\n [3, 2]])\n\n >>> peak_local_max(img1, min_distance=2)\n array([[3, 2]])\n\n >>> img2 = np.zeros((20, 20, 20))\n >>> img2[10, 10, 10] = 1\n >>> peak_local_max(img2, exclude_border=0)\n array([[10, 10, 10]])\n\n \"\"\"\n if type(exclude_border) == bool:\n exclude_border = min_distance if exclude_border else 0\n\n out = np.zeros_like(image, dtype=np.bool)\n\n # In the case of labels, recursively build and return an output\n # operating on each label separately\n if labels is not None:\n label_values = np.unique(labels)\n # Reorder label values to have consecutive integers (no gaps)\n if np.any(np.diff(label_values) != 1):\n mask = labels >= 1\n labels[mask] = 1 + rank_order(labels[mask])[0].astype(labels.dtype)\n labels = labels.astype(np.int32)\n\n # New values for new ordering\n label_values = np.unique(labels)\n for label in label_values[label_values != 0]:\n maskim = (labels == label)\n out += peak_local_max(image * maskim, min_distance=min_distance,\n threshold_abs=threshold_abs,\n threshold_rel=threshold_rel,\n exclude_border=exclude_border,\n indices=False, num_peaks=num_peaks_per_label,\n footprint=footprint, labels=None)\n\n # Select highest intensities (num_peaks)\n coordinates = _get_high_intensity_peaks(image, out, num_peaks)\n\n if indices is True:\n return coordinates\n else:\n nd_indices = tuple(coordinates.T)\n out[nd_indices] = True\n return out\n\n if np.all(image == image.flat[0]):\n if indices is True:\n return np.empty((0, 2), np.int)\n else:\n return out\n\n # Non maximum filter\n if footprint is not None:\n image_max = ndi.maximum_filter(image, footprint=footprint,\n mode='constant')\n else:\n size = 2 * min_distance + 1\n image_max = ndi.maximum_filter(image, size=size, mode='constant')\n mask = image == image_max\n\n if exclude_border:\n # zero out the image borders\n for i in range(mask.ndim):\n mask = mask.swapaxes(0, i)\n remove = (footprint.shape[i] if footprint is not None\n else 2 * exclude_border)\n mask[:remove // 2] = mask[-remove // 2:] = False\n mask = mask.swapaxes(0, i)\n\n # find top peak candidates above a threshold\n thresholds = []\n if threshold_abs is None:\n threshold_abs = image.min()\n thresholds.append(threshold_abs)\n if threshold_rel is not None:\n thresholds.append(threshold_rel * image.max())\n if thresholds:\n mask &= image > max(thresholds)\n\n # Select highest intensities (num_peaks)\n coordinates = _get_high_intensity_peaks(image, mask, num_peaks)\n\n if indices is True:\n return coordinates\n else:\n nd_indices = tuple(coordinates.T)\n out[nd_indices] = True\n return out\n\n\ndef _prominent_peaks(image, min_xdistance=1, min_ydistance=1,\n threshold=None, num_peaks=np.inf):\n \"\"\"Return peaks with non-maximum suppression.\n\n Identifies most prominent features separated by certain distances.\n Non-maximum suppression with different sizes is applied separately\n in the first and second dimension of the image to identify peaks.\n\n Parameters\n ----------\n image : (M, N) ndarray\n Input image.\n min_xdistance : int\n Minimum distance separating features in the x dimension.\n min_ydistance : int\n Minimum distance separating features in the y dimension.\n threshold : float\n Minimum intensity of peaks. Default is `0.5 * max(image)`.\n num_peaks : int\n Maximum number of peaks. When the number of peaks exceeds `num_peaks`,\n return `num_peaks` coordinates based on peak intensity.\n\n Returns\n -------\n intensity, xcoords, ycoords : tuple of array\n Peak intensity values, x and y indices.\n \"\"\"\n\n img = image.copy()\n rows, cols = img.shape\n\n if threshold is None:\n threshold = 0.5 * np.max(img)\n\n ycoords_size = 2 * min_ydistance + 1\n xcoords_size = 2 * min_xdistance + 1\n img_max = ndi.maximum_filter1d(img, size=ycoords_size, axis=0,\n mode='constant', cval=0)\n img_max = ndi.maximum_filter1d(img_max, size=xcoords_size, axis=1,\n mode='constant', cval=0)\n mask = (img == img_max)\n img *= mask\n img_t = img > threshold\n\n label_img = measure.label(img_t)\n props = measure.regionprops(label_img, img_max)\n\n # Sort the list of peaks by intensity, not left-right, so larger peaks\n # in Hough space cannot be arbitrarily suppressed by smaller neighbors\n props = sorted(props, key=lambda x: x.max_intensity)[::-1]\n coords = np.array([np.round(p.centroid) for p in props], dtype=int)\n\n img_peaks = []\n ycoords_peaks = []\n xcoords_peaks = []\n\n # relative coordinate grid for local neighbourhood suppression\n ycoords_ext, xcoords_ext = np.mgrid[-min_ydistance:min_ydistance + 1,\n -min_xdistance:min_xdistance + 1]\n\n for ycoords_idx, xcoords_idx in coords:\n accum = img_max[ycoords_idx, xcoords_idx]\n if accum > threshold:\n # absolute coordinate grid for local neighbourhood suppression\n ycoords_nh = ycoords_idx + ycoords_ext\n xcoords_nh = xcoords_idx + xcoords_ext\n\n # no reflection for distance neighbourhood\n ycoords_in = np.logical_and(ycoords_nh > 0, ycoords_nh < rows)\n ycoords_nh = ycoords_nh[ycoords_in]\n xcoords_nh = xcoords_nh[ycoords_in]\n\n # reflect xcoords and assume xcoords are continuous,\n # e.g. for angles:\n # (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)\n xcoords_low = xcoords_nh < 0\n ycoords_nh[xcoords_low] = rows - ycoords_nh[xcoords_low]\n xcoords_nh[xcoords_low] += cols\n xcoords_high = xcoords_nh >= cols\n ycoords_nh[xcoords_high] = rows - ycoords_nh[xcoords_high]\n xcoords_nh[xcoords_high] -= cols\n\n # suppress neighbourhood\n img_max[ycoords_nh, xcoords_nh] = 0\n\n # add current feature to peaks\n img_peaks.append(accum)\n ycoords_peaks.append(ycoords_idx)\n xcoords_peaks.append(xcoords_idx)\n\n img_peaks = np.array(img_peaks)\n ycoords_peaks = np.array(ycoords_peaks)\n xcoords_peaks = np.array(xcoords_peaks)\n\n if num_peaks < len(img_peaks):\n idx_maxsort = np.argsort(img_peaks)[::-1][:num_peaks]\n img_peaks = img_peaks[idx_maxsort]\n ycoords_peaks = ycoords_peaks[idx_maxsort]\n xcoords_peaks = xcoords_peaks[idx_maxsort]\n\n return img_peaks, xcoords_peaks, ycoords_peaks\n", "import numpy as np\nfrom skimage._shared.testing import assert_allclose\n\nfrom skimage.feature.register_translation import (register_translation,\n _upsampled_dft)\nfrom skimage.data import camera, binary_blobs\nfrom scipy.ndimage import fourier_shift\nfrom skimage import img_as_float\nfrom skimage._shared import testing\n\n\ndef test_correlation():\n reference_image = np.fft.fftn(camera())\n shift = (-7, 12)\n shifted_image = fourier_shift(reference_image, shift)\n\n # pixel precision\n result, error, diffphase = register_translation(reference_image,\n shifted_image,\n space=\"fourier\")\n assert_allclose(result[:2], -np.array(shift))\n\n\ndef test_subpixel_precision():\n reference_image = np.fft.fftn(camera())\n subpixel_shift = (-2.4, 1.32)\n shifted_image = fourier_shift(reference_image, subpixel_shift)\n\n # subpixel precision\n result, error, diffphase = register_translation(reference_image,\n shifted_image, 100,\n space=\"fourier\")\n assert_allclose(result[:2], -np.array(subpixel_shift), atol=0.05)\n\n\ndef test_real_input():\n reference_image = camera()\n subpixel_shift = (-2.4, 1.32)\n shifted_image = fourier_shift(np.fft.fftn(reference_image), subpixel_shift)\n shifted_image = np.fft.ifftn(shifted_image)\n\n # subpixel precision\n result, error, diffphase = register_translation(reference_image,\n shifted_image, 100)\n assert_allclose(result[:2], -np.array(subpixel_shift), atol=0.05)\n\n\ndef test_size_one_dimension_input():\n # take a strip of the input image\n reference_image = np.fft.fftn(camera()[:, 15]).reshape((-1, 1))\n subpixel_shift = (-2.4, 4)\n shifted_image = fourier_shift(reference_image, subpixel_shift)\n\n # subpixel precision\n result, error, diffphase = register_translation(reference_image,\n shifted_image, 100,\n space=\"fourier\")\n assert_allclose(result[:2], -np.array((-2.4, 0)), atol=0.05)\n\n\ndef test_3d_input():\n phantom = img_as_float(binary_blobs(length=32, n_dim=3))\n reference_image = np.fft.fftn(phantom)\n shift = (-2., 1., 5.)\n shifted_image = fourier_shift(reference_image, shift)\n\n result, error, diffphase = register_translation(reference_image,\n shifted_image,\n space=\"fourier\")\n assert_allclose(result, -np.array(shift), atol=0.05)\n # subpixel precision not available for 3-D data\n subpixel_shift = (-2.3, 1., 5.)\n shifted_image = fourier_shift(reference_image, subpixel_shift)\n result, error, diffphase = register_translation(reference_image,\n shifted_image,\n space=\"fourier\")\n assert_allclose(result, -np.array(shift), atol=0.5)\n with testing.raises(NotImplementedError):\n register_translation(\n reference_image,\n shifted_image, upsample_factor=100,\n space=\"fourier\")\n\n\ndef test_unknown_space_input():\n image = np.ones((5, 5))\n with testing.raises(ValueError):\n register_translation(\n image, image,\n space=\"frank\")\n\n\ndef test_wrong_input():\n # Dimensionality mismatch\n image = np.ones((5, 5, 1))\n template = np.ones((5, 5))\n with testing.raises(ValueError):\n register_translation(template, image)\n\n # Greater than 2 dimensions does not support subpixel precision\n # (TODO: should support 3D at some point.)\n image = np.ones((5, 5, 5))\n template = np.ones((5, 5, 5))\n with testing.raises(NotImplementedError):\n register_translation(template, image, 2)\n\n # Size mismatch\n image = np.ones((5, 5))\n template = np.ones((4, 4))\n with testing.raises(ValueError):\n register_translation(template, image)\n\n\ndef test_mismatch_upsampled_region_size():\n with testing.raises(ValueError):\n _upsampled_dft(\n np.ones((4, 4)),\n upsampled_region_size=[3, 2, 1, 4])\n\n\ndef test_mismatch_offsets_size():\n with testing.raises(ValueError):\n _upsampled_dft(np.ones((4, 4)), 3,\n axis_offsets=[3, 2, 1, 4])\n", "\"\"\"Test for correctness of color distance functions\"\"\"\nfrom os.path import abspath, dirname, join as pjoin\n\nimport numpy as np\nfrom skimage._shared.testing import assert_allclose\n\nfrom skimage.color import (deltaE_cie76,\n deltaE_ciede94,\n deltaE_ciede2000,\n deltaE_cmc)\n\n\ndef test_ciede2000_dE():\n data = load_ciede2000_data()\n N = len(data)\n lab1 = np.zeros((N, 3))\n lab1[:, 0] = data['L1']\n lab1[:, 1] = data['a1']\n lab1[:, 2] = data['b1']\n\n lab2 = np.zeros((N, 3))\n lab2[:, 0] = data['L2']\n lab2[:, 1] = data['a2']\n lab2[:, 2] = data['b2']\n\n dE2 = deltaE_ciede2000(lab1, lab2)\n\n assert_allclose(dE2, data['dE'], rtol=1.e-4)\n\n\ndef load_ciede2000_data():\n dtype = [('pair', int),\n ('1', int),\n ('L1', float),\n ('a1', float),\n ('b1', float),\n ('a1_prime', float),\n ('C1_prime', float),\n ('h1_prime', float),\n ('hbar_prime', float),\n ('G', float),\n ('T', float),\n ('SL', float),\n ('SC', float),\n ('SH', float),\n ('RT', float),\n ('dE', float),\n ('2', int),\n ('L2', float),\n ('a2', float),\n ('b2', float),\n ('a2_prime', float),\n ('C2_prime', float),\n ('h2_prime', float),\n ]\n\n # note: ciede_test_data.txt contains several intermediate quantities\n path = pjoin(dirname(abspath(__file__)), 'ciede2000_test_data.txt')\n return np.loadtxt(path, dtype=dtype)\n\n\ndef test_cie76():\n data = load_ciede2000_data()\n N = len(data)\n lab1 = np.zeros((N, 3))\n lab1[:, 0] = data['L1']\n lab1[:, 1] = data['a1']\n lab1[:, 2] = data['b1']\n\n lab2 = np.zeros((N, 3))\n lab2[:, 0] = data['L2']\n lab2[:, 1] = data['a2']\n lab2[:, 2] = data['b2']\n\n dE2 = deltaE_cie76(lab1, lab2)\n oracle = np.array([\n 4.00106328, 6.31415011, 9.1776999, 2.06270077, 2.36957073,\n 2.91529271, 2.23606798, 2.23606798, 4.98000036, 4.9800004,\n 4.98000044, 4.98000049, 4.98000036, 4.9800004, 4.98000044,\n 3.53553391, 36.86800781, 31.91002977, 30.25309901, 27.40894015,\n 0.89242934, 0.7972, 0.8583065, 0.82982507, 3.1819238,\n 2.21334297, 1.53890382, 4.60630929, 6.58467989, 3.88641412,\n 1.50514845, 2.3237848, 0.94413208, 1.31910843\n ])\n assert_allclose(dE2, oracle, rtol=1.e-8)\n\n\ndef test_ciede94():\n data = load_ciede2000_data()\n N = len(data)\n lab1 = np.zeros((N, 3))\n lab1[:, 0] = data['L1']\n lab1[:, 1] = data['a1']\n lab1[:, 2] = data['b1']\n\n lab2 = np.zeros((N, 3))\n lab2[:, 0] = data['L2']\n lab2[:, 1] = data['a2']\n lab2[:, 2] = data['b2']\n\n dE2 = deltaE_ciede94(lab1, lab2)\n oracle = np.array([\n 1.39503887, 1.93410055, 2.45433566, 0.68449187, 0.6695627,\n 0.69194527, 2.23606798, 2.03163832, 4.80069441, 4.80069445,\n 4.80069449, 4.80069453, 4.80069441, 4.80069445, 4.80069449,\n 3.40774352, 34.6891632, 29.44137328, 27.91408781, 24.93766082,\n 0.82213163, 0.71658427, 0.8048753, 0.75284394, 1.39099471,\n 1.24808929, 1.29795787, 1.82045088, 2.55613309, 1.42491303,\n 1.41945261, 2.3225685, 0.93853308, 1.30654464\n ])\n assert_allclose(dE2, oracle, rtol=1.e-8)\n\n\ndef test_cmc():\n data = load_ciede2000_data()\n N = len(data)\n lab1 = np.zeros((N, 3))\n lab1[:, 0] = data['L1']\n lab1[:, 1] = data['a1']\n lab1[:, 2] = data['b1']\n\n lab2 = np.zeros((N, 3))\n lab2[:, 0] = data['L2']\n lab2[:, 1] = data['a2']\n lab2[:, 2] = data['b2']\n\n dE2 = deltaE_cmc(lab1, lab2)\n oracle = np.array([\n 1.73873611, 2.49660844, 3.30494501, 0.85735576, 0.88332927,\n 0.97822692, 3.50480874, 2.87930032, 6.5783807, 6.57838075,\n 6.5783808, 6.57838086, 6.67492321, 6.67492326, 6.67492331,\n 4.66852997, 42.10875485, 39.45889064, 38.36005919, 33.93663807,\n 1.14400168, 1.00600419, 1.11302547, 1.05335328, 1.42822951,\n 1.2548143, 1.76838061, 2.02583367, 3.08695508, 1.74893533,\n 1.90095165, 1.70258148, 1.80317207, 2.44934417\n ])\n\n assert_allclose(dE2, oracle, rtol=1.e-8)\n\n\ndef test_single_color_cie76():\n lab1 = (0.5, 0.5, 0.5)\n lab2 = (0.4, 0.4, 0.4)\n deltaE_cie76(lab1, lab2)\n\n\ndef test_single_color_ciede94():\n lab1 = (0.5, 0.5, 0.5)\n lab2 = (0.4, 0.4, 0.4)\n deltaE_ciede94(lab1, lab2)\n\n\ndef test_single_color_ciede2000():\n lab1 = (0.5, 0.5, 0.5)\n lab2 = (0.4, 0.4, 0.4)\n deltaE_ciede2000(lab1, lab2)\n\n\ndef test_single_color_cmc():\n lab1 = (0.5, 0.5, 0.5)\n lab2 = (0.4, 0.4, 0.4)\n deltaE_cmc(lab1, lab2)\n", "\"\"\"\nRandom walker segmentation algorithm\n\nfrom *Random walks for image segmentation*, Leo Grady, IEEE Trans\nPattern Anal Mach Intell. 2006 Nov;28(11):1768-83.\n\nInstalling pyamg and using the 'cg_mg' mode of random_walker improves\nsignificantly the performance.\n\"\"\"\n\nimport numpy as np\nfrom scipy import sparse, ndimage as ndi\n\nfrom .._shared.utils import warn\n\n# executive summary for next code block: try to import umfpack from\n# scipy, but make sure not to raise a fuss if it fails since it's only\n# needed to speed up a few cases.\n# See discussions at:\n# https://groups.google.com/d/msg/scikit-image/FrM5IGP6wh4/1hp-FtVZmfcJ\n# http://stackoverflow.com/questions/13977970/ignore-exceptions-printed-to-stderr-in-del/13977992?noredirect=1#comment28386412_13977992\ntry:\n from scipy.sparse.linalg.dsolve import umfpack\n old_del = umfpack.UmfpackContext.__del__\n\n def new_del(self):\n try:\n old_del(self)\n except AttributeError:\n pass\n umfpack.UmfpackContext.__del__ = new_del\n UmfpackContext = umfpack.UmfpackContext()\nexcept:\n UmfpackContext = None\n\ntry:\n from pyamg import ruge_stuben_solver\n amg_loaded = True\nexcept ImportError:\n amg_loaded = False\n\nfrom ..util import img_as_float\nfrom ..filters import rank_order\n\nfrom scipy.sparse.linalg import cg\nimport scipy\nfrom distutils.version import LooseVersion as Version\nimport functools\n\nif Version(scipy.__version__) >= Version('1.1'):\n cg = functools.partial(cg, atol=0)\n\n#-----------Laplacian--------------------\n\n\ndef _make_graph_edges_3d(n_x, n_y, n_z):\n \"\"\"Returns a list of edges for a 3D image.\n\n Parameters\n ----------\n n_x: integer\n The size of the grid in the x direction.\n n_y: integer\n The size of the grid in the y direction\n n_z: integer\n The size of the grid in the z direction\n\n Returns\n -------\n edges : (2, N) ndarray\n with the total number of edges::\n\n N = n_x * n_y * (nz - 1) +\n n_x * (n_y - 1) * nz +\n (n_x - 1) * n_y * nz\n\n Graph edges with each column describing a node-id pair.\n \"\"\"\n vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))\n edges_deep = np.vstack((vertices[:, :, :-1].ravel(),\n vertices[:, :, 1:].ravel()))\n edges_right = np.vstack((vertices[:, :-1].ravel(),\n vertices[:, 1:].ravel()))\n edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))\n edges = np.hstack((edges_deep, edges_right, edges_down))\n return edges\n\n\ndef _compute_weights_3d(data, spacing, beta=130, eps=1.e-6,\n multichannel=False):\n # Weight calculation is main difference in multispectral version\n # Original gradient**2 replaced with sum of gradients ** 2\n gradients = 0\n for channel in range(0, data.shape[-1]):\n gradients += _compute_gradients_3d(data[..., channel],\n spacing) ** 2\n # All channels considered together in this standard deviation\n beta /= 10 * data.std()\n if multichannel:\n # New final term in beta to give == results in trivial case where\n # multiple identical spectra are passed.\n beta /= np.sqrt(data.shape[-1])\n gradients *= beta\n weights = np.exp(- gradients)\n weights += eps\n return weights\n\n\ndef _compute_gradients_3d(data, spacing):\n gr_deep = np.abs(data[:, :, :-1] - data[:, :, 1:]).ravel() / spacing[2]\n gr_right = np.abs(data[:, :-1] - data[:, 1:]).ravel() / spacing[1]\n gr_down = np.abs(data[:-1] - data[1:]).ravel() / spacing[0]\n return np.r_[gr_deep, gr_right, gr_down]\n\n\ndef _make_laplacian_sparse(edges, weights):\n \"\"\"\n Sparse implementation\n \"\"\"\n pixel_nb = edges.max() + 1\n diag = np.arange(pixel_nb)\n i_indices = np.hstack((edges[0], edges[1]))\n j_indices = np.hstack((edges[1], edges[0]))\n data = np.hstack((-weights, -weights))\n lap = sparse.coo_matrix((data, (i_indices, j_indices)),\n shape=(pixel_nb, pixel_nb))\n connect = - np.ravel(lap.sum(axis=1))\n lap = sparse.coo_matrix(\n (np.hstack((data, connect)), (np.hstack((i_indices, diag)),\n np.hstack((j_indices, diag)))),\n shape=(pixel_nb, pixel_nb))\n return lap.tocsr()\n\n\ndef _clean_labels_ar(X, labels, copy=False):\n X = X.astype(labels.dtype)\n if copy:\n labels = np.copy(labels)\n labels = np.ravel(labels)\n labels[labels == 0] = X\n return labels\n\n\ndef _buildAB(lap_sparse, labels):\n \"\"\"\n Build the matrix A and rhs B of the linear system to solve.\n A and B are two block of the laplacian of the image graph.\n \"\"\"\n labels = labels[labels >= 0]\n indices = np.arange(labels.size)\n unlabeled_indices = indices[labels == 0]\n seeds_indices = indices[labels > 0]\n # The following two lines take most of the time in this function\n B = lap_sparse[unlabeled_indices][:, seeds_indices]\n lap_sparse = lap_sparse[unlabeled_indices][:, unlabeled_indices]\n nlabels = labels.max()\n rhs = []\n for lab in range(1, nlabels + 1):\n mask = (labels[seeds_indices] == lab)\n fs = sparse.csr_matrix(mask)\n fs = fs.transpose()\n rhs.append(B * fs)\n return lap_sparse, rhs\n\n\ndef _mask_edges_weights(edges, weights, mask):\n \"\"\"\n Remove edges of the graph connected to masked nodes, as well as\n corresponding weights of the edges.\n \"\"\"\n mask0 = np.hstack((mask[:, :, :-1].ravel(), mask[:, :-1].ravel(),\n mask[:-1].ravel()))\n mask1 = np.hstack((mask[:, :, 1:].ravel(), mask[:, 1:].ravel(),\n mask[1:].ravel()))\n ind_mask = np.logical_and(mask0, mask1)\n edges, weights = edges[:, ind_mask], weights[ind_mask]\n max_node_index = edges.max()\n # Reassign edges labels to 0, 1, ... edges_number - 1\n order = np.searchsorted(np.unique(edges.ravel()),\n np.arange(max_node_index + 1))\n edges = order[edges.astype(np.int64)]\n return edges, weights\n\n\ndef _build_laplacian(data, spacing, mask=None, beta=50,\n multichannel=False):\n l_x, l_y, l_z = tuple(data.shape[i] for i in range(3))\n edges = _make_graph_edges_3d(l_x, l_y, l_z)\n weights = _compute_weights_3d(data, spacing, beta=beta, eps=1.e-10,\n multichannel=multichannel)\n if mask is not None:\n edges, weights = _mask_edges_weights(edges, weights, mask)\n lap = _make_laplacian_sparse(edges, weights)\n del edges, weights\n return lap\n\n\ndef _check_isolated_seeds(labels):\n \"\"\"\n Prune isolated seed pixels to prevent labeling errors, and\n return coordinates and label values of isolated seeds, so\n that it is possible to put labels back in random walker output.\n \"\"\"\n fill = ndi.binary_propagation(labels == 0, mask=(labels >= 0))\n isolated = np.logical_and(labels > 0, np.logical_not(fill))\n inds = np.nonzero(isolated)\n values = labels[inds]\n labels[inds] = -1\n return inds, values\n\n\n#----------- Random walker algorithm --------------------------------\n\n\ndef random_walker(data, labels, beta=130, mode='bf', tol=1.e-3, copy=True,\n multichannel=False, return_full_prob=False, spacing=None):\n \"\"\"Random walker algorithm for segmentation from markers.\n\n Random walker algorithm is implemented for gray-level or multichannel\n images.\n\n Parameters\n ----------\n data : array_like\n Image to be segmented in phases. Gray-level `data` can be two- or\n three-dimensional; multichannel data can be three- or four-\n dimensional (multichannel=True) with the highest dimension denoting\n channels. Data spacing is assumed isotropic unless the `spacing`\n keyword argument is used.\n labels : array of ints, of same shape as `data` without channels dimension\n Array of seed markers labeled with different positive integers\n for different phases. Zero-labeled pixels are unlabeled pixels.\n Negative labels correspond to inactive pixels that are not taken\n into account (they are removed from the graph). If labels are not\n consecutive integers, the labels array will be transformed so that\n labels are consecutive. In the multichannel case, `labels` should have\n the same shape as a single channel of `data`, i.e. without the final\n dimension denoting channels.\n beta : float\n Penalization coefficient for the random walker motion\n (the greater `beta`, the more difficult the diffusion).\n mode : string, available options {'cg_mg', 'cg', 'bf'}\n Mode for solving the linear system in the random walker algorithm.\n If no preference given, automatically attempt to use the fastest\n option available ('cg_mg' from pyamg >> 'cg' with UMFPACK > 'bf').\n\n - 'bf' (brute force): an LU factorization of the Laplacian is\n computed. This is fast for small images (<1024x1024), but very slow\n and memory-intensive for large images (e.g., 3-D volumes).\n - 'cg' (conjugate gradient): the linear system is solved iteratively\n using the Conjugate Gradient method from scipy.sparse.linalg. This is\n less memory-consuming than the brute force method for large images,\n but it is quite slow.\n - 'cg_mg' (conjugate gradient with multigrid preconditioner): a\n preconditioner is computed using a multigrid solver, then the\n solution is computed with the Conjugate Gradient method. This mode\n requires that the pyamg module (http://pyamg.org/) is\n installed. For images of size > 512x512, this is the recommended\n (fastest) mode.\n\n tol : float\n tolerance to achieve when solving the linear system, in\n cg' and 'cg_mg' modes.\n copy : bool\n If copy is False, the `labels` array will be overwritten with\n the result of the segmentation. Use copy=False if you want to\n save on memory.\n multichannel : bool, default False\n If True, input data is parsed as multichannel data (see 'data' above\n for proper input format in this case)\n return_full_prob : bool, default False\n If True, the probability that a pixel belongs to each of the labels\n will be returned, instead of only the most likely label.\n spacing : iterable of floats\n Spacing between voxels in each spatial dimension. If `None`, then\n the spacing between pixels/voxels in each dimension is assumed 1.\n\n Returns\n -------\n output : ndarray\n * If `return_full_prob` is False, array of ints of same shape as\n `data`, in which each pixel has been labeled according to the marker\n that reached the pixel first by anisotropic diffusion.\n * If `return_full_prob` is True, array of floats of shape\n `(nlabels, data.shape)`. `output[label_nb, i, j]` is the probability\n that label `label_nb` reaches the pixel `(i, j)` first.\n\n See also\n --------\n skimage.morphology.watershed: watershed segmentation\n A segmentation algorithm based on mathematical morphology\n and \"flooding\" of regions from markers.\n\n Notes\n -----\n Multichannel inputs are scaled with all channel data combined. Ensure all\n channels are separately normalized prior to running this algorithm.\n\n The `spacing` argument is specifically for anisotropic datasets, where\n data points are spaced differently in one or more spatial dimensions.\n Anisotropic data is commonly encountered in medical imaging.\n\n The algorithm was first proposed in *Random walks for image\n segmentation*, Leo Grady, IEEE Trans Pattern Anal Mach Intell.\n 2006 Nov;28(11):1768-83.\n\n The algorithm solves the diffusion equation at infinite times for\n sources placed on markers of each phase in turn. A pixel is labeled with\n the phase that has the greatest probability to diffuse first to the pixel.\n\n The diffusion equation is solved by minimizing x.T L x for each phase,\n where L is the Laplacian of the weighted graph of the image, and x is\n the probability that a marker of the given phase arrives first at a pixel\n by diffusion (x=1 on markers of the phase, x=0 on the other markers, and\n the other coefficients are looked for). Each pixel is attributed the label\n for which it has a maximal value of x. The Laplacian L of the image\n is defined as:\n\n - L_ii = d_i, the number of neighbors of pixel i (the degree of i)\n - L_ij = -w_ij if i and j are adjacent pixels\n\n The weight w_ij is a decreasing function of the norm of the local gradient.\n This ensures that diffusion is easier between pixels of similar values.\n\n When the Laplacian is decomposed into blocks of marked and unmarked\n pixels::\n\n L = M B.T\n B A\n\n with first indices corresponding to marked pixels, and then to unmarked\n pixels, minimizing x.T L x for one phase amount to solving::\n\n A x = - B x_m\n\n where x_m = 1 on markers of the given phase, and 0 on other markers.\n This linear system is solved in the algorithm using a direct method for\n small images, and an iterative method for larger images.\n\n Examples\n --------\n >>> np.random.seed(0)\n >>> a = np.zeros((10, 10)) + 0.2 * np.random.rand(10, 10)\n >>> a[5:8, 5:8] += 1\n >>> b = np.zeros_like(a)\n >>> b[3, 3] = 1 # Marker for first phase\n >>> b[6, 6] = 2 # Marker for second phase\n >>> random_walker(a, b)\n array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 2, 2, 2, 1, 1],\n [1, 1, 1, 1, 1, 2, 2, 2, 1, 1],\n [1, 1, 1, 1, 1, 2, 2, 2, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=int32)\n\n \"\"\"\n # Parse input data\n if mode is None:\n if amg_loaded:\n mode = 'cg_mg'\n elif UmfpackContext is not None:\n mode = 'cg'\n else:\n mode = 'bf'\n elif mode not in ('cg_mg', 'cg', 'bf'):\n raise ValueError(\"{mode} is not a valid mode. Valid modes are 'cg_mg',\"\n \" 'cg' and 'bf'\".format(mode=mode))\n\n if UmfpackContext is None and mode == 'cg':\n warn('\"cg\" mode will be used, but it may be slower than '\n '\"bf\" because SciPy was built without UMFPACK. Consider'\n ' rebuilding SciPy with UMFPACK; this will greatly '\n 'accelerate the conjugate gradient (\"cg\") solver. '\n 'You may also install pyamg and run the random_walker '\n 'function in \"cg_mg\" mode (see docstring).')\n\n if (labels != 0).all():\n warn('Random walker only segments unlabeled areas, where '\n 'labels == 0. No zero valued areas in labels were '\n 'found. Returning provided labels.')\n\n if return_full_prob:\n # Find and iterate over valid labels\n unique_labels = np.unique(labels)\n unique_labels = unique_labels[unique_labels > 0]\n\n out_labels = np.empty(labels.shape + (len(unique_labels),),\n dtype=np.bool)\n for n, i in enumerate(unique_labels):\n out_labels[..., n] = (labels == i)\n\n else:\n out_labels = labels\n return out_labels\n\n # This algorithm expects 4-D arrays of floats, where the first three\n # dimensions are spatial and the final denotes channels. 2-D images have\n # a singleton placeholder dimension added for the third spatial dimension,\n # and single channel images likewise have a singleton added for channels.\n # The following block ensures valid input and coerces it to the correct\n # form.\n if not multichannel:\n if data.ndim < 2 or data.ndim > 3:\n raise ValueError('For non-multichannel input, data must be of '\n 'dimension 2 or 3.')\n dims = data.shape # To reshape final labeled result\n data = np.atleast_3d(img_as_float(data))[..., np.newaxis]\n else:\n if data.ndim < 3:\n raise ValueError('For multichannel input, data must have 3 or 4 '\n 'dimensions.')\n dims = data[..., 0].shape # To reshape final labeled result\n data = img_as_float(data)\n if data.ndim == 3: # 2D multispectral, needs singleton in 3rd axis\n data = data[:, :, np.newaxis, :]\n\n # Spacing kwarg checks\n if spacing is None:\n spacing = np.asarray((1.,) * 3)\n elif len(spacing) == len(dims):\n if len(spacing) == 2: # Need a dummy spacing for singleton 3rd dim\n spacing = np.r_[spacing, 1.]\n else: # Convert to array\n spacing = np.asarray(spacing)\n else:\n raise ValueError('Input argument `spacing` incorrect, should be an '\n 'iterable with one number per spatial dimension.')\n\n if copy:\n labels = np.copy(labels)\n label_values = np.unique(labels)\n\n # If some labeled pixels are isolated inside pruned zones, prune them\n # as well and keep the labels for the final output\n inds_isolated_seeds, isolated_values = _check_isolated_seeds(labels)\n\n # Reorder label values to have consecutive integers (no gaps)\n if np.any(np.diff(label_values) != 1):\n mask = labels >= 0\n labels[mask] = rank_order(labels[mask])[0].astype(labels.dtype)\n labels = labels.astype(np.int32)\n\n # If the array has pruned zones, be sure that no isolated pixels\n # exist between pruned zones (they could not be determined)\n if np.any(labels < 0):\n filled = ndi.binary_propagation(labels > 0, mask=labels >= 0)\n labels[np.logical_and(np.logical_not(filled), labels == 0)] = -1\n del filled\n labels = np.atleast_3d(labels)\n\n\n if np.any(labels < 0):\n lap_sparse = _build_laplacian(data, spacing, mask=labels >= 0,\n beta=beta, multichannel=multichannel)\n else:\n lap_sparse = _build_laplacian(data, spacing, beta=beta,\n multichannel=multichannel)\n lap_sparse, B = _buildAB(lap_sparse, labels)\n\n # We solve the linear system\n # lap_sparse X = B\n # where X[i, j] is the probability that a marker of label i arrives\n # first at pixel j by anisotropic diffusion.\n if mode == 'cg':\n X = _solve_cg(lap_sparse, B, tol=tol,\n return_full_prob=return_full_prob)\n if mode == 'cg_mg':\n if not amg_loaded:\n warn(\"\"\"pyamg (http://pyamg.org/)) is needed to use\n this mode, but is not installed. The 'cg' mode will be used\n instead.\"\"\")\n X = _solve_cg(lap_sparse, B, tol=tol,\n return_full_prob=return_full_prob)\n else:\n X = _solve_cg_mg(lap_sparse, B, tol=tol,\n return_full_prob=return_full_prob)\n if mode == 'bf':\n X = _solve_bf(lap_sparse, B,\n return_full_prob=return_full_prob)\n\n # Clean up results\n if return_full_prob:\n labels = labels.astype(np.float)\n # Put back labels of isolated seeds\n if len(isolated_values) > 0:\n labels[inds_isolated_seeds] = isolated_values\n X = np.array([_clean_labels_ar(Xline, labels, copy=True).reshape(dims)\n for Xline in X])\n for i in range(1, int(labels.max()) + 1):\n mask_i = np.squeeze(labels == i)\n X[:, mask_i] = 0\n X[i - 1, mask_i] = 1\n else:\n X = _clean_labels_ar(X + 1, labels).reshape(dims)\n # Put back labels of isolated seeds\n X[inds_isolated_seeds] = isolated_values\n return X\n\n\ndef _solve_bf(lap_sparse, B, return_full_prob=False):\n \"\"\"\n solves lap_sparse X_i = B_i for each phase i. An LU decomposition\n of lap_sparse is computed first. For each pixel, the label i\n corresponding to the maximal X_i is returned.\n \"\"\"\n lap_sparse = lap_sparse.tocsc()\n solver = sparse.linalg.factorized(lap_sparse.astype(np.double))\n X = np.array([solver(np.array((-B[i]).toarray()).ravel())\n for i in range(len(B))])\n if not return_full_prob:\n X = np.argmax(X, axis=0)\n return X\n\n\ndef _solve_cg(lap_sparse, B, tol, return_full_prob=False):\n \"\"\"\n solves lap_sparse X_i = B_i for each phase i, using the conjugate\n gradient method. For each pixel, the label i corresponding to the\n maximal X_i is returned.\n \"\"\"\n lap_sparse = lap_sparse.tocsc()\n X = []\n for i in range(len(B)):\n x0 = cg(lap_sparse, -B[i].toarray(), tol=tol)[0]\n X.append(x0)\n if not return_full_prob:\n X = np.array(X)\n X = np.argmax(X, axis=0)\n return X\n\n\ndef _solve_cg_mg(lap_sparse, B, tol, return_full_prob=False):\n \"\"\"\n solves lap_sparse X_i = B_i for each phase i, using the conjugate\n gradient method with a multigrid preconditioner (ruge-stuben from\n pyamg). For each pixel, the label i corresponding to the maximal\n X_i is returned.\n \"\"\"\n X = []\n ml = ruge_stuben_solver(lap_sparse)\n M = ml.aspreconditioner(cycle='V')\n for i in range(len(B)):\n x0 = cg(lap_sparse, -B[i].toarray(), tol=tol, M=M, maxiter=30)[0]\n X.append(x0)\n if not return_full_prob:\n X = np.array(X)\n X = np.argmax(X, axis=0)\n return X\n", "\nimport numpy as np\nfrom skimage.restoration import inpaint\n\nfrom skimage._shared import testing\nfrom skimage._shared.testing import assert_allclose\n\n\ndef test_inpaint_biharmonic_2d():\n img = np.tile(np.square(np.linspace(0, 1, 5)), (5, 1))\n mask = np.zeros_like(img)\n mask[2, 2:] = 1\n mask[1, 3:] = 1\n mask[0, 4:] = 1\n img[np.where(mask)] = 0\n out = inpaint.inpaint_biharmonic(img, mask)\n ref = np.array(\n [[0., 0.0625, 0.25000000, 0.5625000, 0.73925058],\n [0., 0.0625, 0.25000000, 0.5478048, 0.76557821],\n [0., 0.0625, 0.25842878, 0.5623079, 0.85927796],\n [0., 0.0625, 0.25000000, 0.5625000, 1.00000000],\n [0., 0.0625, 0.25000000, 0.5625000, 1.00000000]]\n )\n assert_allclose(ref, out)\n\n\ndef test_inpaint_biharmonic_3d():\n img = np.tile(np.square(np.linspace(0, 1, 5)), (5, 1))\n img = np.dstack((img, img.T))\n mask = np.zeros_like(img)\n mask[2, 2:, :] = 1\n mask[1, 3:, :] = 1\n mask[0, 4:, :] = 1\n img[np.where(mask)] = 0\n out = inpaint.inpaint_biharmonic(img, mask)\n ref = np.dstack((\n np.array(\n [[0.0000, 0.0625, 0.25000000, 0.56250000, 0.53752796],\n [0.0000, 0.0625, 0.25000000, 0.44443780, 0.53762210],\n [0.0000, 0.0625, 0.23693666, 0.46621112, 0.68615592],\n [0.0000, 0.0625, 0.25000000, 0.56250000, 1.00000000],\n [0.0000, 0.0625, 0.25000000, 0.56250000, 1.00000000]]),\n np.array(\n [[0.0000, 0.0000, 0.00000000, 0.00000000, 0.19621902],\n [0.0625, 0.0625, 0.06250000, 0.17470756, 0.30140091],\n [0.2500, 0.2500, 0.27241289, 0.35155440, 0.43068654],\n [0.5625, 0.5625, 0.56250000, 0.56250000, 0.56250000],\n [1.0000, 1.0000, 1.00000000, 1.00000000, 1.00000000]])\n ))\n assert_allclose(ref, out)\n\n\ndef test_invalid_input():\n img, mask = np.zeros([]), np.zeros([])\n with testing.raises(ValueError):\n inpaint.inpaint_biharmonic(img, mask)\n\n img, mask = np.zeros((2, 2)), np.zeros((4, 1))\n with testing.raises(ValueError):\n inpaint.inpaint_biharmonic(img, mask)\n\n img = np.ma.array(np.zeros((2, 2)), mask=[[0, 0], [0, 0]])\n mask = np.zeros((2, 2))\n with testing.raises(TypeError):\n inpaint.inpaint_biharmonic(img, mask)\n", "\"\"\"\n=====================\nImage Deconvolution\n=====================\n\nIn this example, we deconvolve a noisy version of an image using Wiener\nand unsupervised Wiener algorithms. This algorithms are based on\nlinear models that can't restore sharp edge as much as non-linear\nmethods (like TV restoration) but are much faster.\n\nWiener filter\n-------------\nThe inverse filter based on the PSF (Point Spread Function),\nthe prior regularisation (penalisation of high frequency) and the\ntradeoff between the data and prior adequacy. The regularization\nparameter must be hand tuned.\n\nUnsupervised Wiener\n-------------------\nThis algorithm has a self-tuned regularisation parameters based on\ndata learning. This is not common and based on the following\npublication [1]_. The algorithm is based on a iterative Gibbs sampler that\ndraw alternatively samples of posterior conditional law of the image,\nthe noise power and the image frequency power.\n\n.. [1] François Orieux, Jean-François Giovannelli, and Thomas\n Rodet, \"Bayesian estimation of regularization and point\n spread function parameters for Wiener-Hunt deconvolution\",\n J. Opt. Soc. Am. A 27, 1593-1607 (2010)\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import color, data, restoration\n\nastro = color.rgb2gray(data.astronaut())\nfrom scipy.signal import convolve2d as conv2\npsf = np.ones((5, 5)) / 25\nastro = conv2(astro, psf, 'same')\nastro += 0.1 * astro.std() * np.random.standard_normal(astro.shape)\n\ndeconvolved, _ = restoration.unsupervised_wiener(astro, psf)\n\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 5),\n sharex=True, sharey=True)\n\nplt.gray()\n\nax[0].imshow(astro, vmin=deconvolved.min(), vmax=deconvolved.max())\nax[0].axis('off')\nax[0].set_title('Data')\n\nax[1].imshow(deconvolved)\nax[1].axis('off')\nax[1].set_title('Self tuned restoration')\n\nfig.tight_layout()\n\nplt.show()\n", "import numpy as np\nfrom skimage.segmentation import find_boundaries, mark_boundaries\n\nfrom skimage._shared.testing import assert_array_equal, assert_allclose\n\n\nwhite = (1, 1, 1)\n\n\ndef test_find_boundaries():\n image = np.zeros((10, 10), dtype=np.uint8)\n image[2:7, 2:7] = 1\n\n ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [0, 1, 1, 0, 0, 0, 1, 1, 0, 0],\n [0, 1, 1, 0, 0, 0, 1, 1, 0, 0],\n [0, 1, 1, 0, 0, 0, 1, 1, 0, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])\n\n result = find_boundaries(image)\n assert_array_equal(result, ref)\n\n\ndef test_find_boundaries_bool():\n image = np.zeros((5, 5), dtype=np.bool)\n image[2:5, 2:5] = True\n\n ref = np.array([[False, False, False, False, False],\n [False, False, True, True, True],\n [False, True, True, True, True],\n [False, True, True, False, False],\n [False, True, True, False, False]], dtype=np.bool)\n result = find_boundaries(image)\n assert_array_equal(result, ref)\n\n\ndef test_mark_boundaries():\n image = np.zeros((10, 10))\n label_image = np.zeros((10, 10), dtype=np.uint8)\n label_image[2:7, 2:7] = 1\n\n ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [0, 1, 1, 0, 0, 0, 1, 1, 0, 0],\n [0, 1, 1, 0, 0, 0, 1, 1, 0, 0],\n [0, 1, 1, 0, 0, 0, 1, 1, 0, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])\n\n marked = mark_boundaries(image, label_image, color=white, mode='thick')\n result = np.mean(marked, axis=-1)\n assert_array_equal(result, ref)\n\n ref = np.array([[0, 2, 2, 2, 2, 2, 2, 2, 0, 0],\n [2, 2, 1, 1, 1, 1, 1, 2, 2, 0],\n [2, 1, 1, 1, 1, 1, 1, 1, 2, 0],\n [2, 1, 1, 2, 2, 2, 1, 1, 2, 0],\n [2, 1, 1, 2, 0, 2, 1, 1, 2, 0],\n [2, 1, 1, 2, 2, 2, 1, 1, 2, 0],\n [2, 1, 1, 1, 1, 1, 1, 1, 2, 0],\n [2, 2, 1, 1, 1, 1, 1, 2, 2, 0],\n [0, 2, 2, 2, 2, 2, 2, 2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])\n marked = mark_boundaries(image, label_image, color=white,\n outline_color=(2, 2, 2), mode='thick')\n result = np.mean(marked, axis=-1)\n assert_array_equal(result, ref)\n\n\ndef test_mark_boundaries_bool():\n image = np.zeros((10, 10), dtype=np.bool)\n label_image = np.zeros((10, 10), dtype=np.uint8)\n label_image[2:7, 2:7] = 1\n\n ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [0, 1, 1, 0, 0, 0, 1, 1, 0, 0],\n [0, 1, 1, 0, 0, 0, 1, 1, 0, 0],\n [0, 1, 1, 0, 0, 0, 1, 1, 0, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])\n\n marked = mark_boundaries(image, label_image, color=white, mode='thick')\n result = np.mean(marked, axis=-1)\n assert_array_equal(result, ref)\n\n\ndef test_mark_boundaries_subpixel():\n labels = np.array([[0, 0, 0, 0],\n [0, 0, 5, 0],\n [0, 1, 5, 0],\n [0, 0, 5, 0],\n [0, 0, 0, 0]], dtype=np.uint8)\n np.random.seed(0)\n image = np.round(np.random.rand(*labels.shape), 2)\n marked = mark_boundaries(image, labels, color=white, mode='subpixel')\n marked_proj = np.round(np.mean(marked, axis=-1), 2)\n\n ref_result = np.array(\n [[ 0.55, 0.63, 0.72, 0.69, 0.6 , 0.55, 0.54],\n [ 0.45, 0.58, 0.72, 1. , 1. , 1. , 0.69],\n [ 0.42, 0.54, 0.65, 1. , 0.44, 1. , 0.89],\n [ 0.69, 1. , 1. , 1. , 0.69, 1. , 0.83],\n [ 0.96, 1. , 0.38, 1. , 0.79, 1. , 0.53],\n [ 0.89, 1. , 1. , 1. , 0.38, 1. , 0.16],\n [ 0.57, 0.78, 0.93, 1. , 0.07, 1. , 0.09],\n [ 0.2 , 0.52, 0.92, 1. , 1. , 1. , 0.54],\n [ 0.02, 0.35, 0.83, 0.9 , 0.78, 0.81, 0.87]])\n assert_allclose(marked_proj, ref_result, atol=0.01)\n", "import numpy as np\nfrom skimage import dtype_limits\nfrom skimage.util.dtype import dtype_range\nfrom skimage.util import invert\n\nfrom skimage._shared.testing import assert_array_equal\n\n\ndef test_invert_bool():\n dtype = 'bool'\n image = np.zeros((3, 3), dtype=dtype)\n upper_dtype_limit = dtype_limits(image, clip_negative=False)[1]\n image[1, :] = upper_dtype_limit\n expected = np.zeros((3, 3), dtype=dtype) + upper_dtype_limit\n expected[1, :] = 0\n result = invert(image)\n assert_array_equal(expected, result)\n\n\ndef test_invert_uint8():\n dtype = 'uint8'\n image = np.zeros((3, 3), dtype=dtype)\n upper_dtype_limit = dtype_limits(image, clip_negative=False)[1]\n image[1, :] = upper_dtype_limit\n expected = np.zeros((3, 3), dtype=dtype) + upper_dtype_limit\n expected[1, :] = 0\n result = invert(image)\n assert_array_equal(expected, result)\n\n\ndef test_invert_int8():\n dtype = 'int8'\n image = np.zeros((3, 3), dtype=dtype)\n lower_dtype_limit, upper_dtype_limit = \\\n dtype_limits(image, clip_negative=False)\n image[1, :] = lower_dtype_limit\n image[2, :] = upper_dtype_limit\n expected = np.zeros((3, 3), dtype=dtype)\n expected[2, :] = lower_dtype_limit\n expected[1, :] = upper_dtype_limit\n expected[0, :] = -1\n result = invert(image)\n assert_array_equal(expected, result)\n\n\ndef test_invert_float64_signed():\n dtype = 'float64'\n image = np.zeros((3, 3), dtype=dtype)\n lower_dtype_limit, upper_dtype_limit = \\\n dtype_limits(image, clip_negative=False)\n image[1, :] = lower_dtype_limit\n image[2, :] = upper_dtype_limit\n expected = np.zeros((3, 3), dtype=dtype)\n expected[2, :] = lower_dtype_limit\n expected[1, :] = upper_dtype_limit\n result = invert(image, signed_float=True)\n assert_array_equal(expected, result)\n\n\ndef test_invert_float64_unsigned():\n dtype = 'float64'\n image = np.zeros((3, 3), dtype=dtype)\n lower_dtype_limit, upper_dtype_limit = \\\n dtype_limits(image, clip_negative=True)\n image[2, :] = upper_dtype_limit\n expected = np.zeros((3, 3), dtype=dtype)\n expected[0, :] = upper_dtype_limit\n expected[1, :] = upper_dtype_limit\n result = invert(image)\n assert_array_equal(expected, result)\n\n\ndef test_invert_roundtrip():\n for t, limits in dtype_range.items():\n image = np.array(limits, dtype=t)\n expected = invert(invert(image))\n assert_array_equal(image, expected)\n", "from skimage._shared.utils import (copy_func, assert_nD)\nimport numpy.testing as npt\nimport numpy as np\nfrom skimage._shared import testing\n\n\ndef test_assert_nD():\n z = np.random.random(200**2).reshape((200, 200))\n x = z[10:30, 30:10]\n with testing.raises(ValueError):\n assert_nD(x, 2)\n\n\ndef test_copyfunc():\n def foo(a):\n return a\n\n bar = copy_func(foo, name='bar')\n other = copy_func(foo)\n\n npt.assert_equal(bar.__name__, 'bar')\n npt.assert_equal(other.__name__, 'foo')\n\n other.__name__ = 'other'\n\n npt.assert_equal(foo.__name__, 'foo')\n\n\nif __name__ == \"__main__\":\n npt.run_module_suite()\n" ]
[ [ "numpy.random.random", "numpy.random.seed", "numpy.arange", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.max", "numpy.copy", "numpy.concatenate", "numpy.zeros_like", "numpy.random.normal", "numpy.random.randn", "numpy.zeros" ], [ "numpy.log", "numpy.abs", "numpy.conj", "numpy.sqrt", "numpy.random.standard_normal", "numpy.ones", "numpy.prod", "numpy.iscomplexobj", "numpy.zeros" ], [ "scipy.ndimage.maximum_filter1d", "numpy.nonzero", "numpy.unique", "numpy.logical_and", "numpy.transpose", "numpy.all", "scipy.ndimage.maximum_filter", "numpy.max", "numpy.round", "numpy.zeros_like", "numpy.diff", "numpy.column_stack", "numpy.argsort", "numpy.array", "numpy.empty" ], [ "numpy.fft.fftn", "numpy.ones", "numpy.fft.ifftn", "scipy.ndimage.fourier_shift", "numpy.array" ], [ "numpy.array", "numpy.zeros", "numpy.loadtxt" ], [ "numpy.sqrt", "numpy.asarray", "numpy.squeeze", "numpy.any", "numpy.exp", "numpy.hstack", "scipy.sparse.coo_matrix", "numpy.unique", "numpy.arange", "scipy.ndimage.binary_propagation", "numpy.copy", "numpy.argmax", "numpy.diff", "numpy.ravel", "numpy.logical_not", "numpy.nonzero", "scipy.sparse.csr_matrix", "numpy.logical_and", "numpy.array", "scipy.sparse.linalg.dsolve.umfpack.UmfpackContext", "numpy.abs", "numpy.atleast_3d" ], [ "numpy.linspace", "numpy.dstack", "numpy.zeros_like", "numpy.array", "numpy.zeros", "numpy.where" ], [ "matplotlib.pyplot.gray", "numpy.random.standard_normal", "matplotlib.pyplot.subplots", "scipy.signal.convolve2d", "numpy.ones", "matplotlib.pyplot.show" ], [ "numpy.random.seed", "numpy.mean", "numpy.random.rand", "numpy.array", "numpy.zeros" ], [ "numpy.array", "numpy.zeros" ], [ "numpy.testing.assert_equal", "numpy.testing.run_module_suite", "numpy.random.random" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JHuang-CV/OD
[ "290bf90a5f210199b6a3750c88152f7dd2fbc276" ]
[ "mmdet/models/necks/mscatfpn.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import xavier_init\n\nfrom mmdet.core import auto_fp16\nfrom ..registry import NECKS\nfrom ..utils import ConvModule\nfrom mmdet.ops.context_block import ContextBlock\n\nfrom mmdet.models.plugins.squeeze_excitation import ChannelSELayer\n\n\[email protected]_module\nclass MSCATFPN(nn.Module):\n\n def __init__(self,\n in_channels,\n out_channels,\n num_outs,\n start_level=0,\n end_level=-1,\n add_extra_convs=False,\n extra_convs_on_inputs=True,\n relu_before_extra_convs=False,\n no_norm_on_lateral=False,\n conv_cfg=None,\n norm_cfg=None,\n activation=None):\n super(MSCATFPN, self).__init__()\n assert isinstance(in_channels, list)\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.num_ins = len(in_channels)\n self.num_outs = num_outs\n self.activation = activation\n self.relu_before_extra_convs = relu_before_extra_convs\n self.no_norm_on_lateral = no_norm_on_lateral\n self.norm_cfg = norm_cfg\n self.fp16_enabled = False\n\n self.epsilon = 1e-4\n\n self.se = ChannelSELayer(768)\n\n if end_level == -1:\n self.backbone_end_level = self.num_ins\n assert num_outs >= self.num_ins - start_level\n else:\n # if end_level < inputs, no extra level is allowed\n self.backbone_end_level = end_level\n assert end_level <= len(in_channels)\n assert num_outs == end_level - start_level\n self.start_level = start_level\n self.end_level = end_level\n self.add_extra_convs = add_extra_convs\n self.extra_convs_on_inputs = extra_convs_on_inputs\n\n self.lateral_convs = nn.ModuleList()\n self.fpn_convs = nn.ModuleList()\n self.cat_convs = nn.ModuleList()\n self.add_convs = nn.ModuleList()\n #self.gc_block = nn.ModuleList()\n\n self.relu = nn.ReLU()\n\n self.gc_block1 = ContextBlock(inplanes=256, ratio=1./4.)\n self.gc_block2 = ContextBlock(inplanes=256, ratio=1. / 4.)\n\n self.scat_conv = ConvModule(\n out_channels * (self.backbone_end_level-self.start_level),\n out_channels,\n 3,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n activation=self.activation,\n inplace=False)\n\n self.c3_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)\n self.c4_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)\n self.c5_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)\n\n for i in range(self.start_level, self.backbone_end_level):\n l_conv = ConvModule(\n in_channels[i],\n out_channels,\n 1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,\n activation=self.activation,\n inplace=False)\n cat_conv = ConvModule(\n out_channels * (self.backbone_end_level-self.start_level),\n out_channels,\n 3,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n activation=self.activation,\n inplace=False)\n add_conv = ConvModule(\n out_channels,\n out_channels,\n 3,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n activation=self.activation,\n inplace=False)\n self.cat_convs.append(cat_conv)\n self.lateral_convs.append(l_conv)\n self.add_convs.append(add_conv)\n\n #self.gc_block.append(ContextBlock(inplanes=256, ratio=1./4.))\n # add extra conv layers (e.g., RetinaNet)\n extra_levels = num_outs - self.backbone_end_level + self.start_level\n if add_extra_convs and extra_levels >= 1:\n for i in range(extra_levels):\n if i == 0 and self.extra_convs_on_inputs:\n in_channels = self.in_channels[self.backbone_end_level - 1]\n else:\n in_channels = out_channels\n extra_fpn_conv = ConvModule(\n in_channels,\n out_channels,\n 3,\n stride=2,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n activation=self.activation,\n inplace=False)\n self.fpn_convs.append(extra_fpn_conv)\n\n # default init_weights for conv(msra) and norm in ConvModule\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n\n @auto_fp16()\n def forward(self, inputs):\n assert len(inputs) == len(self.in_channels)\n\n # build laterals\n laterals = [\n lateral_conv(inputs[i + self.start_level])\n for i, lateral_conv in enumerate(self.lateral_convs)\n ]\n\n used_backbone_levels = len(laterals)\n\n mulscale_per_level = []\n for i in range(used_backbone_levels):\n level = []\n m = i - 0\n n = used_backbone_levels - 1 - i\n level.append(laterals[i])\n for x in range(m):\n level.insert(0, F.interpolate(level[0], scale_factor=2, mode='nearest'))\n for y in range(n):\n level.append(F.max_pool2d(level[-1], 2, stride=2))\n mulscale_per_level.append(level)\n sglscale_per_level = list(zip(*mulscale_per_level))\n feat_cat = [torch.cat(scale, 1)for scale in sglscale_per_level]\n #channel_se = [self.se(cat_ft) for cat_ft in feat_cat]\n mcat = [cat_conv(feat_cat[i]) for i, cat_conv in enumerate(self.cat_convs)]\n #outs = [gc(outs[i]) for i, gc in enumerate(self.gc_block)]\n mcat = [self.gc_block1(ft) for ft in mcat]\n\n single_list = []\n level = used_backbone_levels // 2\n\n for i in range(used_backbone_levels):\n if i < level:\n single_list.append(F.max_pool2d(laterals[i], 2, stride=2))\n elif i == level:\n single_list.append(laterals[i])\n else:\n single_list.append(F.interpolate(laterals[i], scale_factor=2, mode='nearest'))\n\n single_cat = torch.cat(single_list, 1)\n single_cat = self.scat_conv(single_cat)\n single_cat = self.gc_block2(single_cat)\n\n m = level - 0\n n = used_backbone_levels - 1 - level\n scat = [single_cat]\n for x in range(m):\n scat.insert(0, F.interpolate(scat[0], scale_factor=2, mode='nearest'))\n for y in range(n):\n scat.append(F.max_pool2d(scat[-1], 2, stride=2))\n\n # outs = [scat[i]+lateral for i, lateral in enumerate(laterals)]\n # outs = [add_conv(outs[i]) for i, add_conv in enumerate(self.add_convs)]\n\n outs = []\n for i, (m, s, l) in enumerate(zip(mcat, scat, laterals)):\n outs.append(\n self.add_convs[i](m.sigmoid()*s/2 + l / 2)\n )\n\n if self.num_outs > used_backbone_levels:\n if not self.add_extra_convs:\n for i in range(self.num_outs - used_backbone_levels):\n outs.append(F.max_pool2d(outs[-1], 1, stride=2))\n else:\n if self.extra_convs_on_inputs:\n orig = inputs[self.backbone_end_level - 1]\n outs.append(self.fpn_convs[0](orig))\n else:\n outs.append(self.fpn_convs[0](outs[-1]))\n for i in range(1, self.num_outs-used_backbone_levels):\n if self.relu_before_extra_convs:\n outs.append(self.fpn_convs[i](F.relu(outs[-1])))\n else:\n outs.append(self.fpn_convs[i](outs[-1]))\n return tuple(outs)\n" ]
[ [ "torch.ones", "torch.cat", "torch.nn.ModuleList", "torch.nn.functional.relu", "torch.nn.functional.interpolate", "torch.nn.ReLU", "torch.nn.functional.max_pool2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sjforeman/draco
[ "b0ab40b6984637642b28a5485af1c09c9cf183f2" ]
[ "draco/core/io.py" ]
[ "\"\"\"Tasks for reading and writing data.\n\nTasks\n=====\n\n.. autosummary::\n :toctree:\n\n LoadFiles\n LoadMaps\n LoadFilesFromParams\n Save\n Print\n LoadBeamTransfer\n\nFile Groups\n===========\n\nSeveral tasks accept groups of files as arguments. These are specified in the YAML file as a dictionary like below.\n\n.. code-block:: yaml\n\n list_of_file_groups:\n - tag: first_group # An optional tag naming the group\n files:\n - 'file1.h5'\n - 'file[3-4].h5' # Globs are processed\n - 'file7.h5'\n\n - files: # No tag specified, implicitly gets the tag 'group_2'\n - 'another_file1.h5'\n - 'another_file2.h5'\n\n\n single_group:\n files: ['file1.h5', 'file2.h5']\n\"\"\"\n\nimport os.path\n\nimport h5py\nimport numpy as np\nfrom yaml import dump as yamldump\n\nfrom caput import pipeline\nfrom caput import config\n\nfrom cora.util import units\n\nfrom . import task\nfrom ..util.truncate import bit_truncate_weights, bit_truncate_fixed\nfrom .containers import SiderealStream, TimeStream, TrackBeam\n\n\nTRUNC_SPEC = {\n SiderealStream: {\n \"dataset\": [\"vis\", \"vis_weight\"],\n \"weight_dataset\": [\"vis_weight\", None],\n \"fixed_precision\": 1e-4,\n \"variance_increase\": 1e-3,\n },\n TimeStream: {\n \"dataset\": [\"vis\", \"vis_weight\"],\n \"weight_dataset\": [\"vis_weight\", None],\n \"fixed_precision\": 1e-4,\n \"variance_increase\": 1e-3,\n },\n TrackBeam: {\n \"dataset\": [\"beam\", \"weight\"],\n \"weight_dataset\": [\"weight\", None],\n \"fixed_precision\": 1e-4,\n \"variance_increase\": 1e-3,\n },\n}\n\n\ndef _list_of_filelists(files):\n # Take in a list of lists/glob patterns of filenames\n import glob\n\n f2 = []\n\n for filelist in files:\n\n if isinstance(filelist, str):\n filelist = glob.glob(filelist)\n elif isinstance(filelist, list):\n pass\n else:\n raise Exception(\"Must be list or glob pattern.\")\n f2.append(filelist)\n\n return f2\n\n\ndef _list_or_glob(files):\n # Take in a list of lists/glob patterns of filenames\n import glob\n\n if isinstance(files, str):\n files = sorted(glob.glob(files))\n elif isinstance(files, list):\n pass\n else:\n raise ValueError(\"Argument must be list or glob pattern, got %s\" % repr(files))\n\n return files\n\n\ndef _list_of_filegroups(groups):\n # Process a file group/groups\n import glob\n\n # Convert to list if the group was not included in a list\n if not isinstance(groups, list):\n groups = [groups]\n\n # Iterate over groups, set the tag if needed, and process the file list\n # through glob\n for gi, group in enumerate(groups):\n\n files = group[\"files\"]\n\n if \"tag\" not in group:\n group[\"tag\"] = \"group_%i\" % gi\n\n flist = []\n\n for fname in files:\n flist += glob.glob(fname)\n\n if not len(flist):\n raise RuntimeError(\"No files in group exist (%s).\" % files)\n\n group[\"files\"] = flist\n\n return groups\n\n\nclass LoadMaps(task.MPILoggedTask):\n \"\"\"Load a series of maps from files given in the tasks parameters.\n\n Maps are given as one, or a list of `File Groups` (see\n :mod:`draco.core.io`). Maps within the same group are added together\n before being passed on.\n\n Attributes\n ----------\n maps : list or dict\n A dictionary specifying a file group, or a list of them.\n \"\"\"\n\n maps = config.Property(proptype=_list_of_filegroups)\n\n def next(self):\n \"\"\"Load the groups of maps from disk and pass them on.\n\n Returns\n -------\n map : :class:`containers.Map`\n \"\"\"\n\n from . import containers\n\n # Exit this task if we have eaten all the file groups\n if len(self.maps) == 0:\n raise pipeline.PipelineStopIteration\n\n group = self.maps.pop(0)\n\n map_stack = None\n\n # Iterate over all the files in the group, load them into a Map\n # container and add them all together\n for mfile in group[\"files\"]:\n\n self.log.debug(\"Loading file %s\", mfile)\n\n current_map = containers.Map.from_file(mfile, distributed=True)\n current_map.redistribute(\"freq\")\n\n # Start the stack if needed\n if map_stack is None:\n map_stack = current_map\n\n # Otherwise, check that the new map has consistent frequencies,\n # nside and pol and stack up.\n else:\n\n if (current_map.freq != map_stack.freq).all():\n raise RuntimeError(\"Maps do not have consistent frequencies.\")\n\n if (current_map.index_map[\"pol\"] != map_stack.index_map[\"pol\"]).all():\n raise RuntimeError(\"Maps do not have the same polarisations.\")\n\n if (\n current_map.index_map[\"pixel\"] != map_stack.index_map[\"pixel\"]\n ).all():\n raise RuntimeError(\"Maps do not have the same pixelisation.\")\n\n map_stack.map[:] += current_map.map[:]\n\n # Assign a tag to the stack of maps\n map_stack.attrs[\"tag\"] = group[\"tag\"]\n\n return map_stack\n\n\nclass LoadFITSCatalog(task.SingleTask):\n \"\"\"Load an SDSS-style FITS source catalog.\n\n Catalogs are given as one, or a list of `File Groups` (see\n :mod:`draco.core.io`). Catalogs within the same group are combined together\n before being passed on.\n\n Attributes\n ----------\n catalogs : list or dict\n A dictionary specifying a file group, or a list of them.\n z_range : list, optional\n Select only sources with a redshift within the given range.\n freq_range : list, optional\n Select only sources with a 21cm line freq within the given range. Overrides\n `z_range`.\n \"\"\"\n\n catalogs = config.Property(proptype=_list_of_filegroups)\n z_range = config.list_type(type_=float, length=2, default=None)\n freq_range = config.list_type(type_=float, length=2, default=None)\n\n def process(self):\n \"\"\"Load the groups of catalogs from disk, concatenate them and pass them on.\n\n Returns\n -------\n catalog : :class:`containers.SpectroscopicCatalog`\n \"\"\"\n\n from astropy.io import fits\n from . import containers\n\n # Exit this task if we have eaten all the file groups\n if len(self.catalogs) == 0:\n raise pipeline.PipelineStopIteration\n\n group = self.catalogs.pop(0)\n\n # Set the redshift selection\n if self.freq_range:\n zl = units.nu21 / self.freq_range[1] - 1\n zh = units.nu21 / self.freq_range[0] - 1\n self.z_range = (zl, zh)\n\n if self.z_range:\n zl, zh = self.z_range\n self.log.info(f\"Applying redshift selection {zl:.2f} <= z <= {zh:.2f}\")\n\n # Load the data only on rank=0 and then broadcast\n if self.comm.rank == 0:\n # Iterate over all the files in the group, load them into a Map\n # container and add them all together\n catalog_stack = []\n for cfile in group[\"files\"]:\n\n self.log.debug(\"Loading file %s\", cfile)\n\n # TODO: read out the weights from the catalogs\n with fits.open(cfile, mode=\"readonly\") as cat:\n pos = np.array([cat[1].data[col] for col in [\"RA\", \"DEC\", \"Z\"]])\n\n # Apply any redshift selection to the objects\n if self.z_range:\n zsel = (pos[2] >= self.z_range[0]) & (pos[2] <= self.z_range[1])\n pos = pos[:, zsel]\n\n catalog_stack.append(pos)\n\n # NOTE: this one is tricky, for some reason the concatenate in here\n # produces a non C contiguous array, so we need to ensure that otherwise\n # the broadcasting will get very confused\n catalog_array = np.concatenate(catalog_stack, axis=-1).astype(np.float64)\n catalog_array = np.ascontiguousarray(catalog_array)\n num_objects = catalog_array.shape[-1]\n else:\n num_objects = None\n catalog_array = None\n\n # Broadcast the size of the catalog to all ranks, create the target array and\n # broadcast into it\n num_objects = self.comm.bcast(num_objects, root=0)\n self.log.debug(f\"Constructing catalog with {num_objects} objects.\")\n\n if self.comm.rank != 0:\n catalog_array = np.zeros((3, num_objects), dtype=np.float64)\n self.comm.Bcast(catalog_array, root=0)\n\n catalog = containers.SpectroscopicCatalog(object_id=num_objects)\n catalog[\"position\"][\"ra\"] = catalog_array[0]\n catalog[\"position\"][\"dec\"] = catalog_array[1]\n catalog[\"redshift\"][\"z\"] = catalog_array[2]\n catalog[\"redshift\"][\"z_error\"] = 0\n\n # Assign a tag to the stack of maps\n catalog.attrs[\"tag\"] = group[\"tag\"]\n\n return catalog\n\n\nclass LoadFilesFromParams(task.SingleTask):\n \"\"\"Load data from files given in the tasks parameters.\n\n Attributes\n ----------\n files : glob pattern, or list\n Can either be a glob pattern, or lists of actual files.\n distributed : bool, optional\n Whether the file should be loaded distributed across ranks.\n convert_strings : bool, optional\n Convert strings to unicode when loading.\n selections : dict, optional\n A dictionary of axis selections. See the section below for details.\n\n Selections\n ----------\n Selections can be given to limit the data read to specified subsets. They can be\n given for any named axis in the container.\n\n Selections can be given as a slice with an `<axis name>_range` key with either\n `[start, stop]` or `[start, stop, step]` as the value. Alternatively a list of\n explicit indices to extract can be given with the `<axis name>_index` key, and\n the value is a list of the indices. If both `<axis name>_range` and `<axis\n name>_index` keys are given the former will take precedence, but you should\n clearly avoid doing this.\n\n Additionally index based selections currently don't work for distributed reads.\n\n Here's an example in the YAML format that the pipeline uses:\n\n .. code-block:: yaml\n\n selections:\n freq_range: [256, 512, 4] # A strided slice\n stack_index: [1, 2, 4, 9, 16, 25, 36, 49, 64] # A sparse selection\n stack_range: [1, 14] # Will override the selection above\n \"\"\"\n\n files = config.Property(proptype=_list_or_glob)\n distributed = config.Property(proptype=bool, default=True)\n convert_strings = config.Property(proptype=bool, default=True)\n selections = config.Property(proptype=dict, default=None)\n\n def setup(self):\n \"\"\"Resolve the selections.\"\"\"\n self._sel = self._resolve_sel()\n\n def process(self):\n \"\"\"Load the given files in turn and pass on.\n\n Returns\n -------\n cont : subclass of `memh5.BasicCont`\n \"\"\"\n\n from caput import memh5\n\n # Garbage collect to workaround leaking memory from containers.\n # TODO: find actual source of leak\n import gc\n\n gc.collect()\n\n if len(self.files) == 0:\n raise pipeline.PipelineStopIteration\n\n # Fetch and remove the first item in the list\n file_ = self.files.pop(0)\n\n self.log.info(f\"Loading file {file_}\")\n self.log.debug(f\"Reading with selections: {self._sel}\")\n\n # If we are applying selections we need to dispatch the `from_file` via the\n # correct subclass, rather than relying on the internal detection of the\n # subclass. To minimise the number of files being opened this is only done on\n # rank=0 and is then broadcast\n if self._sel:\n if self.comm.rank == 0:\n with h5py.File(file_, \"r\") as fh:\n clspath = memh5.MemDiskGroup._detect_subclass_path(fh)\n else:\n clspath = None\n clspath = self.comm.bcast(clspath, root=0)\n new_cls = memh5.MemDiskGroup._resolve_subclass(clspath)\n else:\n new_cls = memh5.BasicCont\n\n cont = new_cls.from_file(\n file_,\n distributed=self.distributed,\n comm=self.comm,\n convert_attribute_strings=self.convert_strings,\n convert_dataset_strings=self.convert_strings,\n **self._sel,\n )\n\n if \"tag\" not in cont.attrs:\n # Get the first part of the actual filename and use it as the tag\n tag = os.path.splitext(os.path.basename(file_))[0]\n\n cont.attrs[\"tag\"] = tag\n\n return cont\n\n def _resolve_sel(self):\n # Turn the selection parameters into actual selectable types\n\n sel = {}\n\n sel_parsers = {\"range\": self._parse_range, \"index\": self._parse_index}\n\n # To enforce the precedence of range vs index selections, we rely on the fact\n # that a sort will place the axis_range keys after axis_index keys\n for k in sorted(self.selections or []):\n\n # Parse the key to get the axis name and type, accounting for the fact the\n # axis name may contain an underscore\n *axis, type_ = k.split(\"_\")\n axis_name = \"_\".join(axis)\n\n if type_ not in sel_parsers:\n raise ValueError(\n f'Unsupported selection type \"{type_}\", or invalid key \"{k}\"'\n )\n\n sel[f\"{axis_name}_sel\"] = sel_parsers[type_](self.selections[k])\n\n return sel\n\n def _parse_range(self, x):\n # Parse and validate a range type selection\n\n if not isinstance(x, (list, tuple)) or len(x) > 3 or len(x) < 2:\n raise ValueError(\n f\"Range spec must be a length 2 or 3 list or tuple. Got {x}.\"\n )\n\n for v in x:\n if not isinstance(v, int):\n raise ValueError(f\"All elements of range spec must be ints. Got {x}\")\n\n return slice(*x)\n\n def _parse_index(self, x):\n # Parse and validate an index type selection\n\n if not isinstance(x, (list, tuple)) or len(x) == 0:\n raise ValueError(f\"Index spec must be a non-empty list or tuple. Got {x}.\")\n\n for v in x:\n if not isinstance(v, int):\n raise ValueError(f\"All elements of index spec must be ints. Got {x}\")\n\n return list(x)\n\n\n# Define alias for old code\nLoadBasicCont = LoadFilesFromParams\n\n\nclass FindFiles(pipeline.TaskBase):\n \"\"\"Take a glob or list of files specified as a parameter in the\n configuration file and pass on to other tasks.\n\n Parameters\n ----------\n files : list or glob\n \"\"\"\n\n files = config.Property(proptype=_list_or_glob)\n\n def setup(self):\n \"\"\"Return list of files specified in the parameters.\"\"\"\n if not isinstance(self.files, (list, tuple)):\n raise RuntimeError(\"Argument must be list of files.\")\n\n return self.files\n\n\nclass LoadFiles(LoadFilesFromParams):\n \"\"\"Load data from files passed into the setup routine.\n\n File must be a serialised subclass of :class:`memh5.BasicCont`.\n \"\"\"\n\n files = None\n\n def setup(self, files):\n \"\"\"Set the list of files to load.\n\n Parameters\n ----------\n files : list\n \"\"\"\n\n # Call the baseclass setup to resolve any selections\n super().setup()\n\n if not isinstance(files, (list, tuple)):\n raise RuntimeError(f'Argument must be list of files. Got \"{files}\"')\n\n self.files = files\n\n\nclass Save(pipeline.TaskBase):\n \"\"\"Save out the input, and pass it on.\n\n Assumes that the input has a `to_hdf5` method. Appends a *tag* if there is\n a `tag` entry in the attributes, otherwise just uses a count.\n\n Attributes\n ----------\n root : str\n Root of the file name to output to.\n \"\"\"\n\n root = config.Property(proptype=str)\n\n count = 0\n\n def next(self, data):\n \"\"\"Write out the data file.\n\n Assumes it has an MPIDataset interface.\n\n Parameters\n ----------\n data : mpidataset.MPIDataset\n Data to write out.\n \"\"\"\n\n if \"tag\" not in data.attrs:\n tag = self.count\n self.count += 1\n else:\n tag = data.attrs[\"tag\"]\n\n fname = \"%s_%s.h5\" % (self.root, str(tag))\n\n data.to_hdf5(fname)\n\n return data\n\n\nclass Print(pipeline.TaskBase):\n \"\"\"Stupid module which just prints whatever it gets. Good for debugging.\"\"\"\n\n def next(self, input_):\n\n print(input_)\n\n return input_\n\n\nclass LoadBeamTransfer(pipeline.TaskBase):\n \"\"\"Loads a beam transfer manager from disk.\n\n Attributes\n ----------\n product_directory : str\n Path to the saved Beam Transfer products.\n \"\"\"\n\n product_directory = config.Property(proptype=str)\n\n def setup(self):\n \"\"\"Load the beam transfer matrices.\n\n Returns\n -------\n tel : TransitTelescope\n Object describing the telescope.\n bt : BeamTransfer\n BeamTransfer manager.\n feed_info : list, optional\n Optional list providing additional information about each feed.\n \"\"\"\n\n import os\n\n from drift.core import beamtransfer\n\n if not os.path.exists(self.product_directory):\n raise RuntimeError(\"BeamTransfers do not exist.\")\n\n bt = beamtransfer.BeamTransfer(self.product_directory)\n\n tel = bt.telescope\n\n try:\n return tel, bt, tel.feeds\n except AttributeError:\n return tel, bt\n\n\nclass LoadProductManager(pipeline.TaskBase):\n \"\"\"Loads a driftscan product manager from disk.\n\n Attributes\n ----------\n product_directory : str\n Path to the root of the products. This is the same as the output\n directory used by ``drift-makeproducts``.\n \"\"\"\n\n product_directory = config.Property(proptype=str)\n\n def setup(self):\n \"\"\"Load the beam transfer matrices.\n\n Returns\n -------\n manager : ProductManager\n Object describing the telescope.\n \"\"\"\n\n import os\n\n from drift.core import manager\n\n if not os.path.exists(self.product_directory):\n raise RuntimeError(\"Products do not exist.\")\n\n # Load ProductManager and Timestream\n pm = manager.ProductManager.from_config(self.product_directory)\n\n return pm\n\n\nclass Truncate(task.SingleTask):\n \"\"\"Precision truncate data prior to saving with bitshuffle compression.\n\n If no configuration is provided, will look for preset values for the\n input container. Any properties defined in the config will override the\n presets.\n\n If available, each specified dataset will be truncated relative to a\n (specified) weight dataset with the truncation increasing the variance up\n to the specified maximum in `variance_increase`. If there is no specified\n weight dataset then the truncation falls back to using the\n `fixed_precision`.\n\n Attributes\n ----------\n dataset : list of str\n Datasets to truncate.\n weight_dataset : list of str\n Datasets to use as inverse variance for truncation precision.\n fixed_precision : float\n Relative precision to truncate to (default 1e-4).\n variance_increase : float\n Maximum fractional increase in variance from numerical truncation.\n \"\"\"\n\n dataset = config.Property(proptype=list, default=None)\n weight_dataset = config.Property(proptype=list, default=None)\n fixed_precision = config.Property(proptype=float, default=None)\n variance_increase = config.Property(proptype=float, default=None)\n\n def _get_params(self, container):\n \"\"\"Load truncation parameters from config or container defaults.\"\"\"\n if container in TRUNC_SPEC:\n self.log.info(\"Truncating from preset for container {}\".format(container))\n for key in [\n \"dataset\",\n \"weight_dataset\",\n \"fixed_precision\",\n \"variance_increase\",\n ]:\n attr = getattr(self, key)\n if attr is None:\n setattr(self, key, TRUNC_SPEC[container][key])\n else:\n self.log.info(\"Overriding container default for '{}'.\".format(key))\n else:\n if (\n self.dataset is None\n or self.fixed_precision is None\n or self.variance_increase is None\n ):\n raise pipeline.PipelineConfigError(\n \"Container {} has no preset values. You must define all of 'dataset', \"\n \"'fixed_precision', and 'variance_increase' properties.\".format(\n container\n )\n )\n # Factor of 3 for variance over uniform distribution of truncation errors\n self.variance_increase *= 3\n\n def process(self, data):\n \"\"\"Truncate the incoming data.\n\n The truncation is done *in place*.\n\n Parameters\n ----------\n data : containers.ContainerBase\n Data to truncate.\n\n Returns\n -------\n truncated_data : containers.ContainerBase\n Truncated data.\n \"\"\"\n # get truncation parameters from config or container defaults\n self._get_params(type(data))\n\n if self.weight_dataset is None:\n self.weight_dataset = [None] * len(self.dataset)\n\n for dset, wgt in zip(self.dataset, self.weight_dataset):\n old_shape = data[dset].local_shape\n val = np.ndarray.reshape(data[dset][:], data[dset][:].size)\n if wgt is None:\n if np.iscomplexobj(data[dset]):\n data[dset][:].real = bit_truncate_fixed(\n val.real, self.fixed_precision\n ).reshape(old_shape)\n data[dset][:].imag = bit_truncate_fixed(\n val.imag, self.fixed_precision\n ).reshape(old_shape)\n else:\n data[dset][:] = bit_truncate_fixed(\n val, self.fixed_precision\n ).reshape(old_shape)\n else:\n if data[dset][:].shape != data[wgt][:].shape:\n raise pipeline.PipelineRuntimeError(\n \"Dataset and weight arrays must have same shape ({} != {})\".format(\n data[dset].shape, data[wgt].shape\n )\n )\n invvar = np.ndarray.reshape(data[wgt][:], data[dset][:].size)\n if np.iscomplexobj(data[dset]):\n data[dset][:].real = bit_truncate_weights(\n val.real,\n invvar * 2.0 / self.variance_increase,\n self.fixed_precision,\n ).reshape(old_shape)\n data[dset][:].imag = bit_truncate_weights(\n val.imag,\n invvar * 2.0 / self.variance_increase,\n self.fixed_precision,\n ).reshape(old_shape)\n else:\n data[dset][:] = bit_truncate_weights(\n val, invvar / self.variance_increase, self.fixed_precision\n ).reshape(old_shape)\n\n return data\n\n\nclass SaveModuleVersions(task.SingleTask):\n \"\"\"Write module versions to a YAML file.\n\n The list of modules should be added to the configuration under key 'save_versions'.\n The version strings are written to a YAML file.\n\n Attributes\n ----------\n root : str\n Root of the file name to output to.\n \"\"\"\n\n root = config.Property(proptype=str)\n\n done = True\n\n def setup(self):\n \"\"\"Save module versions.\"\"\"\n\n fname = \"{}_versions.yml\".format(self.root)\n f = open(fname, \"w\")\n f.write(yamldump(self.versions))\n f.close()\n self.done = True\n\n def process(self):\n \"\"\"Do nothing.\"\"\"\n self.done = True\n return\n\n\nclass SaveConfig(task.SingleTask):\n \"\"\"Write pipeline config to a text file.\n\n Yaml configuration document is written to a text file.\n\n Attributes\n ----------\n root : str\n Root of the file name to output to.\n \"\"\"\n\n root = config.Property(proptype=str)\n done = True\n\n def setup(self):\n \"\"\"Save module versions.\"\"\"\n\n fname = \"{}_config.yml\".format(self.root)\n f = open(fname, \"w\")\n f.write(yamldump(self.pipeline_config))\n f.close()\n self.done = True\n\n def process(self):\n \"\"\"Do nothing.\"\"\"\n self.done = True\n return\n\n\ndef get_telescope(obj):\n \"\"\"Return a telescope object out of the input (either `ProductManager`,\n `BeamTransfer` or `TransitTelescope`).\n \"\"\"\n from drift.core import telescope\n\n try:\n return get_beamtransfer(obj).telescope\n except RuntimeError:\n if isinstance(obj, telescope.TransitTelescope):\n return obj\n\n raise RuntimeError(\"Could not get telescope instance out of %s\" % repr(obj))\n\n\ndef get_beamtransfer(obj):\n \"\"\"Return a BeamTransfer object out of the input (either `ProductManager`,\n `BeamTransfer`).\n \"\"\"\n from drift.core import manager, beamtransfer\n\n if isinstance(obj, beamtransfer.BeamTransfer):\n return obj\n\n if isinstance(obj, manager.ProductManager):\n return obj.beamtransfer\n\n raise RuntimeError(\"Could not get BeamTransfer instance out of %s\" % repr(obj))\n" ]
[ [ "numpy.ascontiguousarray", "numpy.ndarray.reshape", "numpy.concatenate", "numpy.iscomplexobj", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
adam-blinzler/simple-lane-detection
[ "8814e0aaf7ac56b7e5be59634e363ca17839effb" ]
[ "original_author_notes/yolo_video.py" ]
[ "# USAGE\n# python yolo_video.py --input videos/airport.mp4 --output output/airport_output.avi --object_detection object_detection-coco\n\n# import the necessary packages\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\nimport os\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--input\", required=True,\n\thelp=\"path to input video\")\nap.add_argument(\"-o\", \"--output\", required=True,\n\thelp=\"path to output video\")\nap.add_argument(\"-y\", \"--object_detection\", required=True,\n\thelp=\"base path to YOLO directory\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n\thelp=\"minimum probability to filter weak detections\")\nap.add_argument(\"-t\", \"--threshold\", type=float, default=0.3,\n\thelp=\"threshold when applyong non-maxima suppression\")\nargs = vars(ap.parse_args())\n\n# load the COCO class labels our YOLO model was trained on\nlabelsPath = os.path.sep.join([args[\"object_detection\"], \"coco.names\"])\nLABELS = open(labelsPath).read().strip().split(\"\\n\")\n\n# initialize a list of colors to represent each possible class label\nnp.random.seed(42)\nCOLORS = np.random.randint(0, 255, size=(len(LABELS), 3),\n\tdtype=\"uint8\")\n\n# derive the paths to the YOLO weights and model configuration\nweightsPath = os.path.sep.join([args[\"object_detection\"], \"yolov3.weights\"])\nconfigPath = os.path.sep.join([args[\"object_detection\"], \"yolov3.cfg\"])\n\n# load our YOLO object detector trained on COCO dataset (80 classes)\n# and determine only the *output* layer names that we need from YOLO\nprint(\"[INFO] loading YOLO from disk...\")\nnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\nln = net.getLayerNames()\nln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n# initialize the video stream, pointer to output video file, and\n# frame dimensions\nvs = cv2.VideoCapture(args[\"input\"])\nwriter = None\n(W, H) = (None, None)\n\n# try to determine the total number of frames in the video file\ntry:\n\tprop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \\\n\t\telse cv2.CAP_PROP_FRAME_COUNT\n\ttotal = int(vs.get(prop))\n\tprint(\"[INFO] {} total frames in video\".format(total))\n\n# an error occurred while trying to determine the total\n# number of frames in the video file\nexcept:\n\tprint(\"[INFO] could not determine # of frames in video\")\n\tprint(\"[INFO] no approx. completion time can be provided\")\n\ttotal = -1\n\n# loop over frames from the video file stream\nwhile True:\n\t# read the next frame from the file\n\t(grabbed, frame) = vs.read()\n\n\t# if the frame was not grabbed, then we have reached the end\n\t# of the stream\n\tif not grabbed:\n\t\tbreak\n\n\t# if the frame dimensions are empty, grab them\n\tif W is None or H is None:\n\t\t(H, W) = frame.shape[:2]\n\n\t# construct a blob from the input frame and then perform a forward\n\t# pass of the YOLO object detector, giving us our bounding boxes\n\t# and associated probabilities\n\tblob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),\n\t\tswapRB=True, crop=False)\n\tnet.setInput(blob)\n\tstart = time.time()\n\tlayerOutputs = net.forward(ln)\n\tend = time.time()\n\n\t# initialize our lists of detected bounding boxes, confidences,\n\t# and class IDs, respectively\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\n\t# loop over each of the layer outputs\n\tfor output in layerOutputs:\n\t\t# loop over each of the detections\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability)\n\t\t\t# of the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > args[\"confidence\"]:\n\t\t\t\t# scale the bounding box coordinates back relative to\n\t\t\t\t# the size of the image, keeping in mind that YOLO\n\t\t\t\t# actually returns the center (x, y)-coordinates of\n\t\t\t\t# the bounding box followed by the boxes' width and\n\t\t\t\t# height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top\n\t\t\t\t# and and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates,\n\t\t\t\t# confidences, and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping\n\t# bounding boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, args[\"confidence\"],\n\t\targs[\"threshold\"])\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\t# draw a bounding box rectangle and label on the frame\n\t\t\tcolor = [int(c) for c in COLORS[classIDs[i]]]\n\t\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\n\t\t\ttext = \"{}: {:.4f}\".format(LABELS[classIDs[i]],\n\t\t\t\tconfidences[i])\n\t\t\tcv2.putText(frame, text, (x, y - 5),\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\n\t# check if the video writer is None\n\tif writer is None:\n\t\t# initialize our video writer\n\t\tfourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n\t\twriter = cv2.VideoWriter(args[\"output\"], fourcc, 30,\n\t\t\t(frame.shape[1], frame.shape[0]), True)\n\n\t\t# some information on processing single frame\n\t\tif total > 0:\n\t\t\telap = (end - start)\n\t\t\tprint(\"[INFO] single frame took {:.4f} seconds\".format(elap))\n\t\t\tprint(\"[INFO] estimated total time to finish: {:.4f}\".format(\n\t\t\t\telap * total))\n\n\t# write the output frame to disk\n\twriter.write(frame)\n\n# release the file pointers\nprint(\"[INFO] cleaning up...\")\nwriter.release()\nvs.release()" ]
[ [ "numpy.array", "numpy.argmax", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vikashplus/MPL
[ "4a784fd94dc7a5988a1eca85851ee546ca1992f9" ]
[ "MPL/MPL_envs/reach/reach_v0.py" ]
[ "import numpy as np\nfrom gym import utils\nfrom mjrl.envs import mujoco_env\nfrom mujoco_py import MjViewer\nfrom MPL.MPL_robot.robot import Robot\nimport os\n\n# TODO: Action normalization is missing\n\nclass sallyReachEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self, noise_scale=0.0):\n\n # prep\n utils.EzPickle.__init__(self)\n self._noise_scale = noise_scale\n self.initializing = True\n curr_dir = os.path.dirname(os.path.abspath(__file__))\n self.Rtarget = 0\n self.Ltarget = 0\n self.Rgrasp = 0\n self.Lgrasp = 0\n \n # acquire robot\n self.mpl = Robot(name='sallyReach', model_file=curr_dir+'/reach_v0.xml', config_file=curr_dir+'/reach_v0.config')\n\n # acquire env\n mujoco_env.MujocoEnv.__init__(self, curr_dir+'/reach_v0.xml', 20)\n self.Rtarget = self.sim.model.site_name2id('Rtarget')\n self.Ltarget = self.sim.model.site_name2id('Ltarget')\n self.Rgrasp = self.sim.model.site_name2id('Rgrasp')\n self.Lgrasp = self.sim.model.site_name2id('Lgrasp')\n \n # env ready\n self.initializing = False\n\n\n def step(self, a):\n\n self.mpl.step(self, a, self.frame_skip*self.sim.model.opt.timestep)\n obs = self.get_obs()\n\n score, reward_dict, solved, done = self._get_score_reward_solved_done(self.obs_dict)\n\n # finalize step\n env_info = {\n 'time': self.obs_dict['t'],\n 'obs_dict': self.obs_dict,\n 'rewards': reward_dict,\n 'score': score,\n 'solved': solved\n }\n return obs, reward_dict['total'], done, env_info\n\n\n # query robot and populate observations\n def get_obs(self):\n\n # ask robot for sensor data\n sen = self.mpl.get_sensors(self, noise_scale=self._noise_scale)\n\n # parse sensor data into obs dict\n self.obs_dict = {}\n self.obs_dict['t'] = sen['time']\n self.obs_dict['Tmpl_pos'] = sen['Tmpl_pos']\n self.obs_dict['Rmpl_pos'] = sen['Rmpl_pos']\n self.obs_dict['Lmpl_pos'] = sen['Lmpl_pos']\n self.obs_dict['Tmpl_vel'] = sen['Tmpl_vel']\n self.obs_dict['Rmpl_vel'] = sen['Rmpl_vel']\n self.obs_dict['Lmpl_vel'] = sen['Lmpl_vel']\n self.obs_dict['Rerr'] = self.sim.data.site_xpos[self.Rtarget]-self.sim.data.site_xpos[self.Rgrasp]\n self.obs_dict['Lerr'] = self.sim.data.site_xpos[self.Ltarget]-self.sim.data.site_xpos[self.Lgrasp]\n\n # vectorize observations\n return np.concatenate([\n self.obs_dict['Tmpl_pos'],\n self.obs_dict['Rmpl_pos'],\n self.obs_dict['Lmpl_pos'],\n self.obs_dict['Tmpl_vel'],\n self.obs_dict['Rmpl_vel'],\n self.obs_dict['Lmpl_vel'],\n self.obs_dict['Lerr'],\n self.obs_dict['Rerr']])\n\n\n # evaluate observations\n def _get_score_reward_solved_done(self, obs, act=None):\n Ldist = np.linalg.norm(obs['Lerr'])\n Rdist = np.linalg.norm(obs['Rerr'])\n\n # print(Rdist, Ldist)\n done = (bool( Ldist > 1.0) or bool(Rdist>1.0)) \\\n if not self.initializing else False\n\n reward_dict = {}\n avg_dist = (Ldist+Rdist)/2.0\n score = -1.* avg_dist\n reward_dict[\"avg_dist\"] = score\n reward_dict[\"small_bonus\"] = 2.0*(Ldist<.1) + 2.0*(Rdist<.1)\n reward_dict[\"big_bonus\"] = 2.0*(Ldist<.1) * 2.0*(Rdist<.1)\n reward_dict[\"total\"] = reward_dict[\"avg_dist\"] + reward_dict[\"small_bonus\"] + reward_dict[\"big_bonus\"] - 50.0 * int(done) \n \n solved = bool(avg_dist<0.100)\n return score, reward_dict, solved, done\n\n\n # reset model\n def reset_model(self):\n raise NotImplementedError # for child class to define \n\n\n # evaluate a path\n def compute_path_rewards(self, paths):\n # path has two keys: observations and actions\n # path[\"observations\"] : (num_traj, horizon, obs_dim)\n # path[\"rewards\"] should have shape (num_traj, horizon)\n obs = paths[\"observations\"]\n score, rewards, done = self._get_score_reward_solved_done(obs)\n paths[\"rewards\"] = rewards if rewards.shape[0] > 1 else rewards.ravel()\n\n\n # evaluate policy's success from a collection of paths\n def evaluate_success(self, paths, logger=None):\n success = 0.0\n for p in paths:\n if np.mean(p['env_infos']['solved'][-4:]) > 0.0:\n success += 1.0\n success_rate = 100.0*success/len(paths)\n if logger is None:\n # nowhere to log so return the value\n return success_rate\n else:\n # log the success\n # can log multiple statistics here if needed\n logger.log_kv('success_rate', success_rate)\n return None\n\n # --------------------------------\n # get and set states\n # --------------------------------\n def get_env_state(self):\n return dict(qp=self.data.qpos.copy(), qv=self.data.qvel.copy())\n\n def set_env_state(self, state):\n self.sim.reset()\n qp = state['qp'].copy()\n qv = state['qv'].copy()\n self.set_state(qp, qv)\n self.sim.forward()\n\n # --------------------------------\n # utility functions\n # --------------------------------\n def get_env_infos(self):\n return dict(state=self.get_env_state())\n\n def mj_viewer_setup(self):\n self.viewer = MjViewer(self.sim)\n self.viewer.cam.azimuth = -90\n self.viewer.cam.distance = 2.5\n self.viewer.cam.elevation = -30\n\n self.sim.forward()\n\n def close_env(self):\n pass\n\n\n# Reach at fixed targets\nclass sallyReachEnvFixed(sallyReachEnv):\n def __init__(self):\n super().__init__()\n\n def reset_model(self):\n self.sim.model.site_pos[self.Rtarget] = np.array([0.15, 0.2, .6])\n self.sim.model.site_pos[self.Ltarget] = np.array([-0.15, 0.2, .3])\n self.set_state(self.init_qpos, self.init_qvel)\n self.sim.forward()\n return self.get_obs()\n\n# Reach at random targets\nclass sallyReachEnvRandom(sallyReachEnv):\n def __init__(self):\n super().__init__()\n\n def reset_model(self):\n self.sim.model.site_pos[self.Rtarget] = self.np_random.uniform(high=[0.5, .5, .6], low=[0, .1, .3])\n self.sim.model.site_pos[self.Ltarget] = self.np_random.uniform(high=[0, .5, .6], low=[-.5, .1, .3])\n self.set_state(self.init_qpos, self.init_qvel)\n self.sim.forward()\n return self.get_obs()\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.mean", "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
ivankreso/semseg
[ "fcd2889a1e9e03c3d1a71d19d68d15ce25a5dc79", "fcd2889a1e9e03c3d1a71d19d68d15ce25a5dc79" ]
[ "OLD/losses.py", "OLD/models/resnet/old/resnet_orig.py" ]
[ "import tensorflow as tf\nimport slim\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef add_loss_summaries(total_loss):\n \"\"\"Add summaries for losses in model.\n\n Generates moving average for all losses and associated summaries for\n visualizing the performance of the network.\n\n Args:\n total_loss: Total loss from loss().\n Returns:\n loss_averages_op: op for generating moving averages of losses.\n \"\"\"\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n\n for l in losses + [total_loss]:\n #print(l.op.name)\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n #tf.scalar_summary(l.op.name + ' (raw)', l)\n #tf.scalar_summary(l.op.name, loss_averages.average(l))\n tf.summary.scalar(l.op.name + ' (raw)', l)\n tf.summary.scalar(l.op.name, loss_averages.average(l))\n #tf.scalar_summary([l.op.name + ' (raw)'], l)\n #tf.scalar_summary([l.op.name], loss_averages.average(l))\n\n return loss_averages_op\n\n\ndef total_loss_sum(losses):\n # Assemble all of the losses for the current tower only.\n #losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)\n #print(losses)\n # Calculate the total loss for the current tower.\n #regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n #regularization_losses = tf.contrib.losses.get_regularization_losses()\n regularization_losses = tf.losses.get_regularization_losses()\n #total_loss = tf.add_n(losses + regularization_losses, name='total_loss')\n total_loss = tf.add_n(losses + regularization_losses, name='total_loss')\n return total_loss\n\n\ndef cross_entropy_loss(logits, labels):\n print('loss: cross-entropy')\n num_pixels = -1\n with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):\n labels = tf.reshape(labels, shape=[num_pixels])\n logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])\n mask = labels < FLAGS.num_classes\n idx = tf.where(mask)\n # # labels = tf.reshape(labels, shape=[num_pixels])\n # print(idx)\n labels = tf.to_float(labels)\n labels = tf.gather_nd(labels, idx)\n # labels = tf.boolean_mask(labels, mask)\n labels = tf.to_int32(labels)\n logits = tf.gather_nd(logits, idx)\n # logits = tf.boolean_mask(logits, mask)\n\n \n onehot_labels = tf.one_hot(labels, FLAGS.num_classes)\n xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)\n \n # range_idx = tf.range(tf.shape(labels)[0], dtype=tf.int32)\n # print(range_idx, labels)\n # labels = tf.reshape(labels, shape=[-1,1])\n # range_idx = tf.reshape(range_idx, shape=[-1,1])\n # idx = tf.concat([range_idx, labels], axis=1)\n # print(idx)\n # probs = tf.nn.softmax(logits)\n # probs = tf.gather_nd(probs, idx)\n # print(probs)\n # xent = tf.square(1 - probs) * xent\n # # xent = tf.pow(1 - probs, 3) * xent\n # # xent = (1 - probs) * xent\n\n #num_labels = tf.to_float(tf.reduce_sum(num_labels))\n #num_labels = tf.reduce_sum(tf.to_float(num_labels))\n\n #class_hist = tf.Print(class_hist, [class_hist], 'hist = ', summarize=30)\n #num_labels = tf.reduce_sum(onehot_labels)\n\n #class_hist = tf.to_float(tf.reduce_sum(class_hist, axis=0))\n ##num_labels = tf.Print(num_labels, [num_labels, tf.reduce_sum(onehot_labels)], 'lab = ', summarize=30)\n #class_weights = num_labels / (class_hist + 1)\n ##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)\n ## we need to append 0 here for ignore pixels\n #class_weights = tf.concat([class_weights, [0]], axis=0)\n ##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)\n #class_weights = tf.minimum(tf.to_float(max_weight), class_weights)\n\n # class_weights = tf.ones([FLAGS.num_classes])\n # class_weights = tf.concat([class_weights, [0]], axis=0)\n # #class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)\n # weights = tf.gather(class_weights, labels)\n\n xent = tf.reduce_mean(xent)\n return xent\n\ndef weighted_cross_entropy_loss(logits, labels, class_hist=None, max_weight=1):\n print('loss: cross-entropy')\n print('Using balanced loss with max weight = ', max_weight)\n num_pixels = -1\n with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):\n labels = tf.reshape(labels, shape=[num_pixels])\n onehot_labels = tf.one_hot(labels, FLAGS.num_classes)\n logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])\n xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)\n\n #num_labels = tf.to_float(tf.reduce_sum(num_labels))\n #num_labels = tf.reduce_sum(tf.to_float(num_labels))\n\n #class_hist = tf.Print(class_hist, [class_hist], 'hist = ', summarize=30)\n num_labels = tf.reduce_sum(onehot_labels)\n\n #class_hist = tf.to_float(tf.reduce_sum(class_hist, axis=0))\n ##num_labels = tf.Print(num_labels, [num_labels, tf.reduce_sum(onehot_labels)], 'lab = ', summarize=30)\n #class_weights = num_labels / (class_hist + 1)\n ##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)\n ## we need to append 0 here for ignore pixels\n #class_weights = tf.concat([class_weights, [0]], axis=0)\n ##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)\n #class_weights = tf.minimum(tf.to_float(max_weight), class_weights)\n\n class_weights = tf.ones([FLAGS.num_classes])\n class_weights = tf.concat([class_weights, [0]], axis=0)\n #class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)\n weights = tf.gather(class_weights, labels)\n\n if max_weight > 1:\n raise ValueError()\n wgt_sum = tf.reduce_sum(weights)\n norm_factor = num_labels / wgt_sum\n # weights need to sum to 1\n weights = tf.multiply(weights, norm_factor)\n\n xent = tf.multiply(weights, xent)\n #num_labels = tf.Print(num_labels, [num_labels, wgt_sum], 'num_labels = ')\n #xent = tf.Print(xent, [xent], 'num_labels = ')\n xent = tf.reduce_sum(xent) / num_labels\n return xent\n\n\ndef weighted_cross_entropy_loss_dense(logits, labels, weights=None,\n num_labels=None, max_weight=100):\n print('loss: cross-entropy')\n num_pixels = -1\n with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):\n labels = tf.reshape(labels, shape=[num_pixels])\n onehot_labels = tf.one_hot(labels, FLAGS.num_classes)\n logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])\n xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)\n\n if num_labels is None:\n num_labels = tf.reduce_sum(onehot_labels)\n else:\n num_labels = tf.reduce_sum(num_labels)\n\n print('Using balanced loss with max weight = ', max_weight)\n weights = tf.reshape(weights, shape=[num_pixels])\n weights = tf.minimum(tf.to_float(max_weight), weights)\n wgt_sum = tf.reduce_sum(weights)\n norm_factor = num_labels / wgt_sum\n # weights need to sum to 1\n weights = tf.multiply(weights, norm_factor)\n xent = tf.multiply(weights, xent)\n\n #num_labels = tf.Print(num_labels, [num_labels, wgt_sum], 'num_labels = ')\n #xent = tf.Print(xent, [xent], 'num_labels = ')\n xent = tf.reduce_sum(xent) / num_labels\n print(xent)\n return xent\n\n\ndef cross_entropy_loss_old(logits, labels, weights, num_labels):\n print('loss: cross-entropy')\n num_pixels = -1\n with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels, num_labels]):\n labels = tf.reshape(labels, shape=[num_pixels])\n onehot_labels = tf.one_hot(labels, FLAGS.num_classes)\n logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])\n xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)\n weights = tf.reshape(weights, shape=[num_pixels])\n xent = tf.multiply(weights, xent)\n xent = tf.reduce_sum(xent) / tf.reduce_sum(num_labels)\n print(xent)\n return xent\n\n\ndef mse(yp, yt):\n num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n with tf.name_scope('MeanSquareError'):\n yt = tf.reshape(yt, shape=[num_examples])\n yp = tf.reshape(yp, shape=[num_examples])\n return tf.reduce_mean(tf.square(yt - yp))\n\n\n\ndef weighted_cross_entropy_loss_deprecated(logits, labels, weights=None, max_weight=100):\n#def weighted_cross_entropy_loss(logits, labels, weights=None, max_weight=1e2):\n#def weighted_cross_entropy_loss(logits, labels, weights=None, max_weight=1e3):\n print('loss: Weighted Cross Entropy Loss')\n shape = labels.get_shape().as_list()\n print(shape)\n #num_examples = shape[0] * shape[1]\n num_examples = -1\n #num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n with tf.name_scope(None, 'WeightedCrossEntropyLoss', [logits, labels]):\n labels = tf.reshape(labels, shape=[num_examples])\n #num_labels = tf.to_float(tf.reduce_sum(num_labels))\n one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])\n num_labels = tf.to_float(tf.reduce_sum(one_hot_labels))\n logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n # todo\n #log_softmax = tf.log(tf.nn.softmax(logits_1d)) - never do this!\n log_softmax = tf.nn.log_softmax(logits_1d)\n xent = tf.reduce_sum(-tf.multiply(tf.to_float(one_hot_labels), log_softmax), 1)\n #weighted_xent = tf.mul(weights, xent)\n if weights != None:\n weights = tf.reshape(weights, shape=[num_examples])\n xent = tf.mul(tf.minimum(tf.to_float(max_weight), weights), xent)\n #weighted_xent = xent\n\n total_loss = tf.div(tf.reduce_sum(xent), tf.to_float(num_labels), name='value')\n print(total_loss)\n return total_loss\n\n\ndef flip_xent_loss(logits, labels, weights, max_weight=10):\n print('Loss: Weighted Cross Entropy Loss')\n assert(FLAGS.batch_size == 2)\n num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n labels = tf.reshape(labels, shape=[num_examples])\n weights = tf.reshape(weights, shape=[num_examples])\n #num_labels = tf.to_float(tf.reduce_sum(num_labels))\n with tf.name_scope('FlipXentLoss', [logits, labels]):\n one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])\n num_labels = tf.to_float(tf.reduce_sum(one_hot_labels))\n #print(logits[].get_shape())\n logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n # TODO\n #log_softmax = tf.log(tf.nn.softmax(logits_1d))\n log_softmax = tf.nn.log_softmax(logits_1d)\n xent = tf.reduce_sum(tf.mul(tf.to_float(one_hot_labels), log_softmax), 1)\n #weighted_xent = tf.mul(weights, xent)\n weighted_xent = tf.mul(tf.minimum(tf.to_float(max_weight), weights), xent)\n #weighted_xent = xent\n\n total_loss = - tf.div(tf.reduce_sum(weighted_xent), num_labels, name='value')\n return total_loss\n\n\n\ndef slim_cross_entropy_loss(logits, labels, num_labels):\n print('Loss: Cross Entropy Loss')\n num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n xent_loss = slim.losses.cross_entropy_loss(logits_1d, one_hot_labels)\n return xent_loss\n\n\ndef softmax(logits):\n num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n with tf.op_scope([logits], None, 'Softmax'):\n logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n softmax_1d = tf.nn.softmax(logits_1d)\n softmax_2d = tf.reshape(softmax_1d, [FLAGS.img_height, FLAGS.img_width, FLAGS.num_classes])\n return softmax_2d\n\n\n\n\ndef multiclass_hinge_loss(logits, labels, weights):\n print('loss: Hinge loss')\n num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n num_classes = FLAGS.num_classes\n with tf.op_scope([logits, labels], None, 'MulticlassHingeLoss'):\n #logits = tf.reshape(logits, [num_examples, num_classes])\n #labels = tf.reshape(labels, [num_examples])\n #weights = tf.reshape(weights, [num_examples])\n logits = tf.reshape(logits, [-1, num_classes])\n labels = tf.reshape(labels, [-1])\n weights = tf.reshape(weights, [-1])\n select_mask = tf.greater_equal(labels, 0)\n logits = tf.boolean_mask(logits, select_mask)\n labels = tf.boolean_mask(labels, select_mask)\n weights = tf.boolean_mask(weights, select_mask)\n num_examples = tf.reduce_sum(tf.to_int32(select_mask))\n #num_examples = tf.Print(num_examples, [num_examples, num_labels_old], 'num_examples = ')\n #print(labels)\n #print(logits)\n #print(weights)\n #print(select_mask)\n partitions = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0, dtype=tf.int32)\n #print(partitions)\n #one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n #one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])\n #partitions = tf.to_int32(one_hot_labels)\n\n num_partitions = 2\n scores, score_yt = tf.dynamic_partition(logits, partitions, num_partitions)\n #scores = tf.reshape(scores, [num_examples, num_classes - 1])\n #score_yt = tf.reshape(score_yt, [num_examples, 1])\n scores = tf.reshape(scores, [-1, num_classes - 1])\n score_yt = tf.reshape(score_yt, [-1, 1])\n #print(scores)\n #print(score_yt)\n\n #hinge_loss = tf.maximum(0.0, scores - score_yt + margin)\n hinge_loss = tf.square(tf.maximum(0.0, scores - score_yt + 1.0))\n hinge_loss = tf.reduce_sum(hinge_loss, 1)\n\n #total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))\n #total_loss = tf.div(total_loss, tf.to_float(num_examples), name='value')\n total_loss = tf.reduce_mean(tf.mul(tf.minimum(100.0, weights), hinge_loss))\n\n #tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)\n #tf.nn.l2_loss(t, name=None)\n return total_loss\n\n\ndef metric_hinge_loss(logits, labels, weights, num_labels):\n print('loss: Hinge loss')\n num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n with tf.op_scope([logits, labels], None, 'weightedhingeloss'):\n one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n #codes = tf.nn.softmax(logits_1d)\n codes = tf.nn.l2_normalize(logits_1d, 1)\n # works worse\n # l2 loss -> bad!\n # todo - this is not true svm loss, try it from cs231n\n l2_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1))\n m = 0.2\n #l2_dist = tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1)\n #m = 0.2 ** 2\n #m = 0.1 ** 2\n #m = 0.3 ** 2\n for i in range(num_classes):\n for j in range(num_classes):\n raise valueerror(1)\n hinge_loss = tf.maximum(tf.to_float(0), l2_dist - m)\n total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))\n\n total_loss = tf.div(total_loss, tf.to_float(num_labels), name='value')\n tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)\n\n #tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)\n #tf.nn.l2_loss(t, name=None)\n return total_loss\n\n#def weighted_hinge_loss(logits, labels, weights, num_labels):\n# print('Loss: Hinge Loss')\n# num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n# with tf.op_scope([logits, labels], None, 'WeightedHingeLoss'):\n# weights = tf.reshape(weights, shape=[num_examples])\n# labels = tf.reshape(labels, shape=[num_examples])\n# num_labels = tf.to_float(tf.reduce_sum(num_labels))\n# one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n# one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])\n# logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n# #codes = tf.nn.softmax(logits_1d)\n# codes = tf.nn.l2_normalize(logits_1d, 1)\n# # works worse\n# #l2_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1))\n# #m = 0.2\n# l2_dist = tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1)\n# m = 0.2 ** 2\n# #m = 0.1 ** 2\n# #m = 0.3 ** 2\n# hinge_loss = tf.maximum(tf.to_float(0), l2_dist - m)\n# total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))\n#\n# total_loss = tf.div(total_loss, tf.to_float(num_labels), name='value')\n# tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)\n#\n# #tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)\n# #tf.nn.l2_loss(t, name=None)\n# return total_loss\n\ndef flip_xent_loss_symmetric(logits, labels, weights, num_labels):\n print('Loss: Weighted Cross Entropy Loss')\n num_examples = FLAGS.img_height * FLAGS.img_width\n with tf.op_scope([logits, labels], None, 'WeightedCrossEntropyLoss'):\n labels = tf.reshape(labels, shape=[2, num_examples])\n weights = tf.reshape(weights, shape=[2, num_examples])\n num_labels = tf.to_float(tf.reduce_sum(num_labels))\n #num_labels = tf.to_float(num_labels[0])\n logits_flip = logits[1,:,:,:]\n #weights_flip = weights[1,:]\n\n logits = logits[0,:,:,:]\n weights = weights[0,:]\n labels = labels[0,:]\n one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])\n\n #logits_orig, logits_flip = tf.split(0, 2, logits)\n logits_flip = tf.image.flip_left_right(logits_flip)\n #print(logits[].get_shape())\n logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n logits_1d_flip = tf.reshape(logits_flip, [num_examples, FLAGS.num_classes])\n # TODO\n log_softmax = tf.nn.log_softmax(logits_1d)\n\n #log_softmax_flip = tf.nn.log_softmax(logits_1d_flip)\n softmax_flip = tf.nn.softmax(logits_1d_flip)\n xent = tf.reduce_sum(tf.mul(tf.to_float(one_hot_labels), log_softmax), 1)\n weighted_xent = tf.mul(tf.minimum(tf.to_float(100), weights), xent)\n xent_flip = tf.reduce_sum(tf.mul(softmax_flip, log_softmax), 1)\n xent_flip = tf.mul(tf.minimum(tf.to_float(100), weights), xent_flip)\n #weighted_xent = tf.mul(weights, xent)\n #weighted_xent = xent\n\n #total_loss = tf.div(- tf.reduce_sum(weighted_xent_flip),\n # num_labels, name='value')\n total_loss = - tf.div(tf.reduce_sum(weighted_xent) + tf.reduce_sum(xent_flip),\n num_labels, name='value')\n\n tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)\n return total_loss\n\n", "import tensorflow as tf\nimport argparse\nimport os, re\nimport numpy as np\nfrom tensorflow.contrib.layers import variance_scaling_initializer\n\nfrom tensorpack import *\nfrom tensorpack.utils import logger\nfrom tensorpack.utils.stat import RatioCounter\nfrom tensorpack.tfutils.symbolic_functions import *\nfrom tensorpack.tfutils.summary import *\nfrom tensorpack.dataflow.dataset import ILSVRCMeta\n\nMODEL_DEPTH = None\n\nclass Model(ModelDesc):\n def _get_input_vars(self):\n return [InputVar(tf.float32, [None, 224, 224, 3], 'input'),\n InputVar(tf.int32, [None], 'label')]\n\n def _build_graph(self, input_vars):\n image, label = input_vars\n\n def shortcut(l, n_in, n_out, stride):\n if n_in != n_out:\n l = Conv2D('convshortcut', l, n_out, 1, stride=stride)\n return BatchNorm('bnshortcut', l)\n else:\n return l\n\n def bottleneck(l, ch_out, stride, preact):\n ch_in = l.get_shape().as_list()[-1]\n input = l\n if preact == 'both_preact':\n l = tf.nn.relu(l, name='preact-relu')\n input = l\n l = Conv2D('conv1', l, ch_out, 1, stride=stride)\n l = BatchNorm('bn1', l)\n l = tf.nn.relu(l)\n l = Conv2D('conv2', l, ch_out, 3)\n l = BatchNorm('bn2', l)\n l = tf.nn.relu(l)\n l = Conv2D('conv3', l, ch_out * 4, 1)\n l = BatchNorm('bn3', l) # put bn at the bottom\n return l + shortcut(input, ch_in, ch_out * 4, stride)\n\n def layer(l, layername, features, count, stride, first=False):\n with tf.variable_scope(layername):\n with tf.variable_scope('block0'):\n l = bottleneck(l, features, stride,\n 'no_preact' if first else 'both_preact')\n for i in range(1, count):\n with tf.variable_scope('block{}'.format(i)):\n l = bottleneck(l, features, 1, 'both_preact')\n return l\n\n cfg = {\n 50: ([3,4,6,3]),\n 101: ([3,4,23,3]),\n 152: ([3,8,36,3])\n }\n defs = cfg[MODEL_DEPTH]\n\n with argscope(Conv2D, nl=tf.identity, use_bias=False,\n W_init=variance_scaling_initializer(mode='FAN_OUT')):\n # tensorflow with padding=SAME will by default pad [2,3] here.\n # but caffe conv with stride will pad [3,3]\n image = tf.pad(image, [[0,0],[3,3],[3,3],[0,0]])\n fc1000 = (LinearWrap(image)\n .Conv2D('conv0', 64, 7, stride=2, nl=BNReLU, padding='VALID')\n .MaxPooling('pool0', shape=3, stride=2, padding='SAME')\n .apply(layer, 'group0', 64, defs[0], 1, first=True)\n .apply(layer, 'group1', 128, defs[1], 2)\n .apply(layer, 'group2', 256, defs[2], 2)\n .apply(layer, 'group3', 512, defs[3], 2)\n .tf.nn.relu()\n .GlobalAvgPooling('gap')\n .FullyConnected('fc1000', 1000, nl=tf.identity)())\n prob = tf.nn.softmax(fc1000, name='prob')\n nr_wrong = prediction_incorrect(fc1000, label, name='wrong-top1')\n nr_wrong = prediction_incorrect(fc1000, label, 5, name='wrong-top5')\n\ndef get_inference_augmentor():\n # load ResNet mean from Kaiming:\n #from tensorpack.utils.loadcaffe import get_caffe_pb\n #obj = get_caffe_pb().BlobProto()\n #obj.ParseFromString(open('ResNet_mean.binaryproto').read())\n #pp_mean_224 = np.array(obj.data).reshape(3, 224, 224).transpose(1,2,0)\n\n meta = ILSVRCMeta()\n pp_mean = meta.get_per_pixel_mean()\n pp_mean_224 = pp_mean[16:-16,16:-16,:]\n\n transformers = imgaug.AugmentorList([\n imgaug.ResizeShortestEdge(256),\n imgaug.CenterCrop((224, 224)),\n imgaug.MapImage(lambda x: x - pp_mean_224),\n ])\n return transformers\n\n\ndef init_params(params, data_dir):\n ds = dataset.ILSVRC12(data_dir, 'val', shuffle=False, dir_structure='train')\n ds = AugmentImageComponent(ds, get_inference_augmentor())\n ds = BatchData(ds, 128, remainder=True)\n pred_config = PredictConfig(\n model=Model(),\n session_init=ParamRestore(params),\n input_names=['input', 'label'],\n output_names=['wrong-top1', 'wrong-top5']\n )\n pred = SimpleDatasetPredictor(pred_config, ds)\n acc1, acc5 = RatioCounter(), RatioCounter()\n for o in pred.get_result():\n batch_size = o[0].shape[0]\n acc1.feed(o[0].sum(), batch_size)\n acc5.feed(o[1].sum(), batch_size)\n print(\"Top1 Error: {}\".format(acc1.ratio))\n print(\"Top5 Error: {}\".format(acc5.ratio))\n\ndef name_conversion(caffe_layer_name):\n \"\"\" Convert a caffe parameter name to a tensorflow parameter name as\n defined in the above model \"\"\"\n # beginning & end mapping\n NAME_MAP = {'bn_conv1/beta': 'conv0/bn/beta',\n 'bn_conv1/gamma': 'conv0/bn/gamma',\n 'bn_conv1/mean/EMA': 'conv0/bn/mean/EMA',\n 'bn_conv1/variance/EMA': 'conv0/bn/variance/EMA',\n 'conv1/W': 'conv0/W', 'conv1/b': 'conv0/b',\n 'fc1000/W': 'fc1000/W', 'fc1000/b': 'fc1000/b'}\n if caffe_layer_name in NAME_MAP:\n return NAME_MAP[caffe_layer_name]\n\n s = re.search('([a-z]+)([0-9]+)([a-z]+)_', caffe_layer_name)\n if s is None:\n s = re.search('([a-z]+)([0-9]+)([a-z]+)([0-9]+)_', caffe_layer_name)\n layer_block_part1 = s.group(3)\n layer_block_part2 = s.group(4)\n assert layer_block_part1 in ['a', 'b']\n layer_block = 0 if layer_block_part1 == 'a' else int(layer_block_part2)\n else:\n layer_block = ord(s.group(3)) - ord('a')\n layer_type = s.group(1)\n layer_group = s.group(2)\n\n layer_branch = int(re.search('_branch([0-9])', caffe_layer_name).group(1))\n assert layer_branch in [1, 2]\n if layer_branch == 2:\n layer_id = re.search('_branch[0-9]([a-z])/', caffe_layer_name).group(1)\n layer_id = ord(layer_id) - ord('a') + 1\n\n TYPE_DICT = {'res':'conv', 'bn':'bn'}\n\n tf_name = caffe_layer_name[caffe_layer_name.index('/'):]\n layer_type = TYPE_DICT[layer_type] + \\\n (str(layer_id) if layer_branch == 2 else 'shortcut')\n tf_name = 'group{}/block{}/{}'.format(\n int(layer_group) - 2, layer_block, layer_type) + tf_name\n return tf_name\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') # nargs='*' in multi mode\n parser.add_argument('--load', required=True,\n help='.npy model file generated by tensorpack.utils.loadcaffe')\n parser.add_argument('-d', '--depth', help='resnet depth', required=True, type=int, choices=[50, 101, 152])\n parser.add_argument('--input', help='an input image')\n parser.add_argument('--eval', help='ILSVRC dir to run validation on')\n\n args = parser.parse_args()\n assert args.input or args.eval, \"Choose either input or eval!\"\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n MODEL_DEPTH = args.depth\n\n param = np.load(args.load, encoding='latin1').item()\n resnet_param = {}\n for k, v in six.iteritems(param):\n try:\n newname = name_conversion(k)\n except:\n logger.error(\"Exception when processing caffe layer {}\".format(k))\n raise\n logger.info(\"Name Transform: \" + k + ' --> ' + newname)\n resnet_param[newname] = v\n\n if args.eval:\n eval_on_ILSVRC12(resnet_param, args.eval)\n else:\n run_test(resnet_param, args.input)\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.concat", "tensorflow.nn.log_softmax", "tensorflow.reduce_sum", "tensorflow.minimum", "tensorflow.train.ExponentialMovingAverage", "tensorflow.where", "tensorflow.to_int32", "tensorflow.summary.scalar", "tensorflow.add_n", "tensorflow.boolean_mask", "tensorflow.to_int64", "tensorflow.get_collection", "tensorflow.gather", "tensorflow.name_scope", "tensorflow.to_float", "tensorflow.square", "tensorflow.nn.l2_normalize", "tensorflow.gather_nd", "tensorflow.dynamic_partition", "tensorflow.op_scope", "tensorflow.one_hot", "tensorflow.add_to_collection", "tensorflow.losses.get_regularization_losses", "tensorflow.multiply", "tensorflow.nn.softmax", "tensorflow.reduce_mean", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.ones", "tensorflow.image.flip_left_right", "tensorflow.mul", "tensorflow.greater_equal" ], [ "tensorflow.nn.relu", "tensorflow.nn.softmax", "tensorflow.contrib.layers.variance_scaling_initializer", "tensorflow.pad", "tensorflow.variable_scope", "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
banroku/analySS
[ "15ba9e9216f86a1bf74062eae479a3ce1c9c5a11" ]
[ "drawSS.py" ]
[ "# coding=utf-8\ndef thinningSS(file, max_strain=10, interval=0.1):\n '''a function to conduct data thinning of SS curve at range (0, MAX_STRAIN), with INTERVAL\n This returns np.series of stress with strain in the index. \n FILE should be passed as dictionary containing following: \n 'name': name of sample like 'RL7785'\n 'crv': path(relative) of xxx_crv.csv file\n 'rlt': path(relative) of xxx_rlt.csv file\n 'set': path(relative) of xxx_set.csv file\n '''\n import pandas as pd\n import numpy as np\n \n # read files and parameters\n data = pd.read_csv(file['crv'], sep=',', encoding='shift_jis', skiprows=1, index_col=0)\n data_rlt = pd.read_csv(file['rlt'], sep=',', encoding='shift_jis')\n L = 64 # span\n b = float(data_rlt.iloc[2, 3]) # width of first specimen\n h = float(data_rlt.iloc[2, 4]) # height of first specimen\n #print('span, width, height of first specimen:', L, ',', b, ',', h)#cut out curve of first specimen\n col = ['mm', 'N']\n data = data.reindex(columns=col)\n data.dropna(subset=['mm'], inplace=True)\n \n #%% convert (mm, N) to (%, MPa)\n # sigma = 3*F*L / (2*b*h^2)\n # epsilon = 6*100*s*h / (L^2)\n # F: load, L:span = 64 mm, b:width, h:height, s=strain/mm\n data['strain'] = data['mm'] * 6 * 100 * h / L / L\n data['stress'] = data['N'] * 3 * L / (2 * b * h * h)\n \n #%% data thinnings\n interval_steps = int(max_strain/interval)\n marker = pd.DataFrame({'strain': np.round(np.linspace(0, max_strain, interval_steps, endpoint=False), 2), 'marker': True})\n data_marked = pd.merge(data, marker, on='strain', how='outer')\n data_marked.rename(data_marked['strain'], inplace=True)\n data_marked.sort_values(by=['strain'], inplace=True)\n data_marked.interpolate(method='slinear', limit=1, inplace=True)\n data_marked['marker'].fillna('False', inplace=True)\n data_skipped = data_marked[data_marked['marker']==True]\n thinnedSS = data_skipped['stress']\n thinnedSS.name = file['name']\n \n return thinnedSS\n\n\n#%%\ndef parameters(file):\n '''a function to pick following parameters as pd.Series: \n parameters = ['width', 'height', 'FM', 'FS_max', 'FS_break', 'FE_max', 'FE_break', \n 'd_width', 'd_height', 'd_FM', 'd_FS_max', 'd_FS_break', 'd_FE_max', 'd_FE_break']\n FILE should be passed as dictionary containing following: \n 'name': name of sample like 'RL7785'\n 'crv': path(relative) of xxx_crv.csv file\n 'rlt': path(relative) of xxx_rlt.csv file\n 'set': path(relative) of xxx_set.csv file '''\n\n file_rlt = file['rlt']\n data_rlt = pd.read_csv(file_rlt, sep=',', skiprows=[1,2], index_col=0, encoding='shift_jis')\n parameters = ['幅', '厚さ', '弾性率', '最大点', '破壊点', '最大点.1', '破壊点.1']\n data_rlt = data_rlt.loc[['単純平均', '標準偏差'], parameters]\n data_rlt.index = ['average', 'stdev']\n data_rlt.columns = ['width', 'height', 'FM', 'FS_max', 'FS_break', 'FE_max', 'FE_break']\n data_rlt = data_rlt.values\n data_flattened = [item for sublist in data_rlt for item in sublist] #see below\n parameters = ['width', 'height', 'FM', 'FS_max', 'FS_break', 'FE_max', 'FE_break', \n 'd_width', 'd_height', 'd_FM', 'd_FS_max', 'd_FS_break', 'd_FE_max', 'd_FE_break']\n data_rlt = pd.Series(data_flattened, index=parameters) \n data_rlt.name = file['name']\n \n return data_rlt" ]
[ [ "pandas.merge", "pandas.read_csv", "pandas.Series", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ExplosiveJam/fickettmodel-reproducibility
[ "e47af1d3e2513d35dad65c16d4fd68c23e505f87" ]
[ "bifurcation-diagram/run.py" ]
[ "#!/usr/bin/env python\nr\"\"\" Run many simulations with varying :math:`\\theta`.\n\nThe simulations are run.\nSeparate script should plot bifurcation diagram.\n\n\"\"\"\nimport argparse\nimport os\nimport sys\nimport shutil\n\nimport numpy as np\n\nfrom mpi4py import MPI\n\nfrom saf.fm.nonlinear import Config\nfrom saf.action import solve\nfrom saf.util import reset_logging\n\nTOTAL_THETAS = 251\nFINAL_TIME = 1000\nQ = 4\nIO_FORMAT = 'numpy'\n\n# Format for floating-point numbers.\nFMT = '.3f'\n\n\ndef _worker(tasks, rank):\n for t in tasks:\n _worker_single_task(t, rank)\n\n\ndef _worker_single_task(task, rank):\n theta = task\n worker_name = rank\n\n try:\n outdir = 'theta={:{fmt}}'.format(theta, fmt=FMT)\n outdir = os.path.join(OUTPUT_DIR, outdir)\n\n if os.path.exists(outdir):\n shutil.rmtree(outdir)\n os.mkdir(outdir)\n outname = os.path.join(outdir, 'stdout.log')\n errname = os.path.join(outdir, 'stderr.log')\n sys.stdout = open(outname, 'w')\n sys.stderr = open(errname, 'w')\n msg = 'Worker {} | theta={:{fmt}}'.format(worker_name, theta, fmt=FMT)\n print(msg)\n except Exception as e:\n print('theta={:{fmt}} | {}'.format(theta, str(e), fmt=FMT))\n return\n\n try:\n c = _get_config(theta)\n solve('nonlinear', c, outdir, log_to_file=False)\n reset_logging()\n except Exception as e:\n print('theta={:{fmt}} | {}'.format(theta, str(e), fmt=FMT))\n sys.stdout = sys.__stdout__\n print('theta={:{fmt}} | {}'.format(theta, str(e), fmt=FMT))\n\n\ndef _get_config(theta):\n c = Config()\n\n c.n12 = N12\n c.final_time = FINAL_TIME\n c.dt = 0.005\n c.approximator = 'godunov-minmod'\n c.time_integrator = 'dopri5'\n c.plot_time_step = 0\n c.io_format = IO_FORMAT\n c.play_animation = False\n\n c.lambda_tol = 1e-6\n c.q = Q\n c.theta = theta\n c.reaction_rate_version = 'v2' # Expression exactly as in FariaEtAl2015.\n c.f = 1\n c.ic_amplitude = 0.0\n c.ic_type = 'gaussian'\n c.truncation_coef = 1e6\n\n return c\n\n\np = argparse.ArgumentParser()\np.add_argument('N12', help='Resolution', type=int)\nargs = p.parse_args()\nN12 = args.N12\nOUTPUT_DIR = os.path.join('_output', 'N12={:04d}'.format(N12))\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\nall_tasks = []\n\n# Build `all_tasks` in master process to distribute it to all processes.\nif rank == 0:\n # Uniformly spaced values of :math:`\\theta`.\n theta_values = np.linspace(0.90, 1.15, num=TOTAL_THETAS)\n\n for i in range(size):\n all_tasks.append([])\n\n for i in range(len(theta_values)):\n all_tasks[i % size].append(theta_values[i])\n\n# Now distribute the tasks to each process.\ntasks = comm.scatter(all_tasks, root=0)\n_worker(tasks, rank)\n" ]
[ [ "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ec-better/ewf-ethz-03-01-01
[ "5ca616e5c25bbba29013a7de248af4b69757921b" ]
[ "src/main/app-resources/notebook/libexec/helpers.py" ]
[ "# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nimport cv2\nimport re\n\nfrom shapely import wkt\nfrom shapely.geometry import box, Polygon\nimport pandas as pd\nimport geopandas as gpd\n\nfrom osgeo import gdal, gdalnumeric, osr, ogr\n\ndef ensure_dir(file_path):\n directory = os.path.dirname(file_path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\ndef getResolution(demFolder, return_full_paths = False):\n rasterFilePaths = [f for f in os.listdir(demFolder) if os.path.isfile(os.path.join(demFolder, f))]\n \n if return_full_paths:\n rasterFilePaths = [demFolder + '/' + f for f in rasterFilePaths if f[:4] == 'DEM_' and f[-4:] == '.tif']\n rasterFilePaths.sort(reverse=True)\n else:\n rasterFilePaths = [int(f[4:-4]) for f in rasterFilePaths if f[:4] == 'DEM_' and f[-4:] == '.tif']\n\n return rasterFilePaths\n\ndef readGDAL2numpy(rasterPath, return_geoInformation = False):\n try:\n ds = gdal.Open(rasterPath)\n except RuntimeError:\n print('Unable to open input file')\n sys.exit(1)\n \n data = gdalnumeric.LoadFile(rasterPath, False)\n noDataVal = ds.GetRasterBand(1).GetNoDataValue()\n try:\n if data.dtype in ['float16', 'float32', 'float64'] and noDataVal is not None:\n data[data == noDataVal] = np.NaN\n except:\n print(\"Issue in no data value\")\n \n \n if return_geoInformation == False:\n return data\n else:\n geoTransform = ds.GetGeoTransform()\n projection = ds.GetProjection() \n return data, geoTransform, projection\n\ndef writeNumpyArr2Geotiff(outputPath, data, geoTransform = None, projection = None, GDAL_dtype = gdal.GDT_Byte, noDataValue = None):\n nscn, npix = data.shape\n \n if np.isnan(data).any() and noDataValue is not None:\n data[np.isnan(data)] = noDataValue\n \n ds_new = gdal.GetDriverByName('GTiff').Create(outputPath, npix, nscn, 1, GDAL_dtype)\n \n if geoTransform != None:\n ds_new.SetGeoTransform(geoTransform)\n \n if projection != None:\n ds_new.SetProjection(projection) \n \n outBand = ds_new.GetRasterBand(1)\n outBand.WriteArray(data)\n \n if noDataValue != None:\n ds_new.GetRasterBand(1).SetNoDataValue(noDataValue)\n \n # Close dataset\n ds_new.FlushCache()\n ds_new = None\n outBand = None\n \ndef writeNumpyArr2Saga(outputPath, data, geoTransform = None, projection = None, GDAL_dtype = gdal.GDT_Byte, noDataValue = None):\n nscn, npix = data.shape\n \n if np.isnan(data).any() and noDataValue is not None:\n data[np.isnan(data)] = noDataValue\n \n ds_new = gdal.GetDriverByName('SAGA').Create(outputPath, npix, nscn, 1, GDAL_dtype) \n \n outBand = ds_new.GetRasterBand(1)\n outBand.WriteArray(data)\n \n if noDataValue != None:\n ds_new.GetRasterBand(1).SetNoDataValue(noDataValue)\n \n if projection != None:\n ds_new.SetProjection(projection) \n \n # Close dataset\n ds_new.FlushCache()\n ds_new = None\n outBand = None\n\ndef wkt2bbox(wkt_input):\n wkt_geometry = wkt.loads(wkt_input)\n minx, miny, maxx, maxy = wkt_geometry.bounds\n b = box(minx, miny, maxx, maxy)\n bbox_tuple = list(b.exterior.coords)\n bbox = []\n for point in bbox_tuple:\n bbox.append([point[0],point[1]])\n return bbox\n\ndef wkt2shp(wkt_input, target_epsg, dst_file, bbox=False):\n ensure_dir(dst_file)\n if bbox:\n polygon = Polygon(wkt2bbox(wkt_input))\n else:\n polygon = wkt.loads(wkt_input)\n gpd.GeoDataFrame(pd.DataFrame(['p1'], columns = ['geom']),\n crs = {'init':'epsg:' + str(target_epsg)},\n geometry = [polygon]).to_file(dst_file)\n \ndef rescaleDEM(image, noData = None, maxVal = 255):\n if noData:\n image = np.float32(image)\n image[image == noData] = np.nan\n \n minElev = np.nanmin(image)\n maxElev = np.nanmax(image)\n \n rescaled = ( ((image - minElev)/(maxElev- minElev)) * (maxVal - 1) ) + 1\n return np.uint8(rescaled)\n\ndef joinStrArg(str1, str2, str3 = None):\n if str3 is not None:\n return str(str1) + ' ' + str(str2) + ' ' + str(str3)\n else:\n return str(str1) + ' ' + str(str2) \n\ndef wkt2EPSG(wkt, epsg='/usr/local/share/proj/epsg', forceProj4=False):\n ''' \n Transform a WKT string to an EPSG code\n \n Arguments\n ---------\n \n wkt: WKT definition\n epsg: the proj.4 epsg file (defaults to '/usr/local/share/proj/epsg')\n forceProj4: whether to perform brute force proj4 epsg file check (last resort)\n \n Returns: EPSG code\n \n '''\n code = None\n p_in = osr.SpatialReference()\n s = p_in.ImportFromWkt(wkt)\n if s == 5: # invalid WKT\n return None\n if p_in.IsLocal() == 1: # this is a local definition\n return p_in.ExportToWkt()\n if p_in.IsGeographic() == 1: # this is a geographic srs\n cstype = 'GEOGCS'\n else: # this is a projected srs\n cstype = 'PROJCS'\n an = p_in.GetAuthorityName(cstype)\n ac = p_in.GetAuthorityCode(cstype)\n if an is not None and ac is not None: # return the EPSG code\n return '%s:%s' % \\\n (p_in.GetAuthorityName(cstype), p_in.GetAuthorityCode(cstype))\n else: # try brute force approach by grokking proj epsg definition file\n p_out = p_in.ExportToProj4()\n if p_out:\n if forceProj4 is True:\n return p_out\n f = open(epsg)\n for line in f:\n if line.find(p_out) != -1:\n m = re.search('<(\\\\d+)>', line)\n if m:\n code = m.group(1)\n break\n if code: # match\n return 'EPSG:%s' % code\n else: # no match\n return None\n else:\n return None\n \ndef getCornerCoordinates(gdal_dataSet, target_srs = False):\n \"\"\"\n :param gdal_dataSet: /path/to/file OR gdal dataset\n :param target_srs: False for output coordinates in same coordinate system OR 'wgs84' for lat long values OR custom osr.SpatialReference() object\n :return: list of corner coordinates\n\n --0--------3--\n | |\n | | <--- Index of coordinates returned in list\n | |\n --1--------2--\n \"\"\"\n\n\n if type(gdal_dataSet) is str:\n gdal_dataSet = gdal.Open(gdal_dataSet)\n\n gt=gdal_dataSet.GetGeoTransform() # gt = [ulx, xres, xskew, uly, yskew, yres]\n cols = gdal_dataSet.RasterXSize\n rows = gdal_dataSet.RasterYSize\n\n def GetExtent(gt,cols,rows):\n ''' Return list of corner coordinates from a geotransform\n @type gt: C{tuple/list}\n @param gt: geotransform\n @type cols: C{int}\n @param cols: number of columns in the dataset\n @type rows: C{int}\n @param rows: number of rows in the dataset\n @rtype: C{[float,...,float]}\n @return: coordinates of each corner\n '''\n ext=[]\n xarr=[0,cols]\n yarr=[0,rows]\n\n for px in xarr:\n for py in yarr:\n x=gt[0]+(px*gt[1])+(py*gt[2])\n y=gt[3]+(px*gt[4])+(py*gt[5])\n ext.append([x,y])\n #print(x,y)\n yarr.reverse()\n return ext\n\n def ReprojectCoords(coords,src_srs,tgt_srs):\n ''' Reproject a list of x,y coordinates.\n\n @type geom: C{tuple/list}\n @param geom: List of [[x,y],...[x,y]] coordinates\n @type src_srs: C{osr.SpatialReference}\n @param src_srs: OSR SpatialReference object\n @type tgt_srs: C{osr.SpatialReference}\n @param tgt_srs: OSR SpatialReference object\n @rtype: C{tuple/list}\n @return: List of transformed [[x,y],...[x,y]] coordinates\n '''\n trans_coords=[]\n transform = osr.CoordinateTransformation( src_srs, tgt_srs)\n for x,y in coords:\n x,y,z = transform.TransformPoint(x,y)\n trans_coords.append([x,y])\n return trans_coords\n\n ext = GetExtent(gt,cols,rows)\n\n src_srs=osr.SpatialReference()\n src_srs.ImportFromWkt(gdal_dataSet.GetProjection())\n\n if target_srs == False:\n return ext\n elif target_srs == 'wgs84':\n #target_srs = src_srs.CloneGeogCS()\n #\n target_srs=osr.SpatialReference()\n target_srs.ImportFromEPSG(4326)\n\n return ReprojectCoords(ext,src_srs,target_srs)\n\ndef resizeToDEM(imPath, sizeDEM = None, geoTransform = None, projection = None, noData = None):\n imDS = gdal.Open(imPath, gdal.GA_ReadOnly)\n imPix = imDS.RasterXSize\n imScn = imDS.RasterYSize\n \n nscn, npix = sizeDEM\n \n if sizeDEM is not None:\n if nscn != imScn or npix != imPix:\n print(\"Size Mismatch\")\n image = imDS.ReadAsArray()\n if noData is not None:\n image = np.float32(image)\n image[image == noData] = np.nan\n imNew = cv2.resize(image, (npix, nscn), interpolation=cv2.INTER_CUBIC)\n \n writeNumpyArr2Geotiff(imPath, imNew, geoTransform = geoTransform, projection = projection, GDAL_dtype = gdal.GDT_UInt16, noDataValue = noData)\n \ndef map_uint16_to_uint8(img, lower_bound=None, upper_bound=None):\n '''\n Map a 16-bit image trough a lookup table to convert it to 8-bit.\n\n '''\n if not(0 <= lower_bound < 2**16) and lower_bound is not None:\n raise ValueError(\n '\"lower_bound\" must be in the range [0, 65535]')\n if not(0 <= upper_bound < 2**16) and upper_bound is not None:\n raise ValueError(\n '\"upper_bound\" must be in the range [0, 65535]')\n if lower_bound is None:\n lower_bound = np.min(img)\n if upper_bound is None:\n upper_bound = np.max(img)\n if lower_bound >= upper_bound:\n raise ValueError(\n '\"lower_bound\" must be smaller than \"upper_bound\"')\n lut = np.concatenate([\n np.zeros(lower_bound, dtype=np.uint16),\n np.linspace(0, 255, upper_bound - lower_bound).astype(np.uint16),\n np.ones(2**16 - upper_bound, dtype=np.uint16) * 255\n ])\n return lut[img].astype(np.uint8) \n\ndef closeCV(mask, kernelSize = 11):\n kernel = np.ones((kernelSize, kernelSize),np.uint8)\n return cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n\ndef newGeoTransform(geoTransform, maskBounds):\n\tnewGeoTransform = (geoTransform[0]+ maskBounds['xMin'] * geoTransform[1],\n geoTransform[1],\n geoTransform[2],\n geoTransform[3] + maskBounds['yMin'] * geoTransform[5],\n geoTransform[4],\n geoTransform[5]) \n\treturn newGeoTransform\n\ndef shrinkGeoTransform(geoTransform, factor):\n\tnewGeoTransform = (geoTransform[0],\n geoTransform[1] / factor,\n geoTransform[2],\n geoTransform[3],\n geoTransform[4],\n geoTransform[5] / factor) \n\treturn newGeoTransform\n" ]
[ [ "numpy.nanmax", "numpy.linspace", "numpy.min", "numpy.isnan", "numpy.uint8", "numpy.nanmin", "pandas.DataFrame", "numpy.ones", "numpy.max", "numpy.float32", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
eivindeb/gym-letMPC
[ "7041aa56a25aa9a1c749088f2b370c910d21fe75" ]
[ "gym_let_mpc/let_mpc.py" ]
[ "import gym\nfrom gym.utils import seeding\nimport numpy as np\nimport json\nfrom gym_let_mpc.simulator import ControlSystem\nfrom gym_let_mpc.controllers import ETMPC, AHMPC\nimport collections.abc\nimport matplotlib.pyplot as plt\nfrom gym_let_mpc.utils import str_replace_whole_words\nimport copy\n\n\nclass LetMPCEnv(gym.Env):\n def __init__(self, config_path):\n with open(config_path) as file_object:\n config = json.load(file_object)\n\n if config[\"mpc\"][\"model\"] == \"plant\":\n config[\"mpc\"][\"model\"] = copy.deepcopy(config[\"plant\"][\"model\"])\n elif config[\"mpc\"][\"model\"].get(\"parameters\", None) == \"plant\":\n config[\"mpc\"][\"model\"][\"parameters\"] = copy.deepcopy(config[\"plant\"][\"model\"][\"parameters\"])\n\n if config[\"lqr\"][\"model\"] == \"plant\":\n config[\"lqr\"][\"model\"] = copy.deepcopy(config[\"plant\"][\"model\"])\n elif config[\"lqr\"][\"model\"] == \"mpc\":\n config[\"lqr\"][\"model\"] = copy.deepcopy(config[\"mpc\"][\"model\"])\n elif config[\"lqr\"][\"model\"].get(\"parameters\", None) == \"plant\":\n config[\"lqr\"][\"model\"][\"parameters\"] = copy.deepcopy(config[\"plant\"][\"model\"][\"parameters\"])\n elif config[\"lqr\"][\"model\"].get(\"parameters\", None) == \"mpc\":\n config[\"lqr\"][\"model\"][\"parameters\"] = copy.deepcopy(config[\"mpc\"][\"model\"][\"parameters\"])\n\n self.config = config\n assert \"max_steps\" in self.config[\"environment\"]\n self.max_steps = self.config[\"environment\"][\"max_steps\"]\n\n assert \"randomize\" in self.config[\"environment\"]\n assert \"state\" in self.config[\"environment\"][\"randomize\"] and \"reference\" in self.config[\"environment\"][\"randomize\"]\n assert \"render\" in self.config[\"environment\"]\n if config[\"mpc\"][\"type\"] == \"ETMPC\":\n assert len(config[\"environment\"][\"action\"][\"variables\"]) == 1 and \\\n config[\"environment\"][\"action\"][\"variables\"][0][\"name\"] == \"mpc_compute\"\n controller = ETMPC(config[\"mpc\"], config[\"lqr\"])\n self.action_space = gym.spaces.Discrete(2)\n elif config[\"mpc\"][\"type\"] == \"AHMPC\":\n assert len(config[\"environment\"][\"action\"][\"variables\"]) == 1 and \\\n config[\"environment\"][\"action\"][\"variables\"][0][\"name\"] == \"mpc_horizon\"\n controller = AHMPC(config[\"mpc\"])\n self.action_space = gym.spaces.Box(low=np.array([1]), high=np.array([50]), dtype=np.float32)\n else:\n raise ValueError\n self.control_system = ControlSystem(config[\"plant\"], controller=controller)\n self.history = None\n self.steps_count = None\n self.np_random = None\n self.min_constraint_delta = 0.25 # TODO: how and where to set\n\n obs_high = []\n obs_low = []\n for obs_var in self.config[\"environment\"][\"observation\"][\"variables\"]:\n for var_transform in obs_var.get(\"transform\", [\"none\"]):\n for lim_i, lim in enumerate(obs_var.get(\"limits\", [None, None])):\n if lim is None:\n if lim_i == 0:\n obs_low.append(-np.finfo(np.float32).max)\n else:\n obs_high.append(np.finfo(np.float32).max)\n else:\n if var_transform == \"none\":\n if lim_i == 0:\n obs_low.append(lim)\n else:\n obs_high.append(lim)\n elif var_transform == \"absolute\":\n if lim_i == 0:\n obs_low.append(0)\n else:\n obs_high.append(lim)\n elif var_transform == \"square\":\n if lim_i == 0:\n obs_low.append(0)\n else:\n obs_high.append(lim ** 2)\n else:\n raise NotImplementedError\n self.observation_space = gym.spaces.Box(low=np.array(obs_low, dtype=np.float32),\n high=np.array(obs_high, dtype=np.float32),\n dtype=np.float32)\n\n self.value_function_is_set = False\n\n self.viewer = None\n\n def seed(self, seed=None):\n \"\"\"\n Seed the random number generator of the control system.\n :param seed: (int) seed for random state\n \"\"\"\n self.np_random, seed = gym.utils.seeding.np_random(seed)\n self.control_system.seed(seed)\n return [seed]\n\n def reset(self, state=None, reference=None, constraint=None, model=None, process_noise=None, tvp=None):\n \"\"\"\n Reset state of environment. Note that the simulator is reset, the MPC solution is computed and the first\n MPC action is applied to the plant.\n\n :param state: (dict) initial conditions (value) for state name (key).\n :param reference: (dict) reference value (value) for reference name (key).\n :param constraint: (dict) constraint values (value) for constraint names (key).\n :param model: (dict) dictionary of dictionary where first key is model that it applies to [\"plant\", \"mpc\", \"lqr\"],\n first value is dictionary of model parameters where second value is the specified model parameter value.\n :param process_noise: (dict) process noise values (value) as ndarray for state name (key). The process noise at\n each time step loops through the provided array.\n :param tvp: (dict) values of time-varying parameters. New values are generated if values arent specified\n for all time steps elapsed.\n :return: ([float]) observation vector\n \"\"\"\n def update_dict_recursively(d, u):\n for k, v in u.items():\n if isinstance(v, collections.abc.Mapping):\n d[k] = update_dict_recursively(d.get(k, {}), v)\n else:\n d[k] = v\n return d\n\n sampled_state = self.sample_state()\n sampled_reference = self.sample_reference()\n sampled_constraint = self.sample_constraints()\n sampled_model = self.sample_model()\n\n if state is not None:\n sampled_state.update(state)\n elif len(sampled_state) == 0:\n sampled_state = None\n if reference is not None:\n sampled_reference.update(reference)\n elif len(sampled_reference) == 0:\n sampled_reference = None\n if constraint is not None:\n sampled_constraint.update(constraint)\n elif len(sampled_constraint) == 0:\n sampled_constraint = None\n if model is not None:\n sampled_model = update_dict_recursively(sampled_model, model)\n elif len(sampled_model) == 0:\n sampled_model = None\n self.control_system.reset(state=sampled_state, reference=sampled_reference, constraint=sampled_constraint,\n model=sampled_model, process_noise=process_noise, tvp=tvp)\n if self.config[\"mpc\"][\"type\"] == \"ETMPC\":\n self.control_system.step(action=np.array([1]))\n obs = self.get_observation()\n self.history = {\"obs\": [obs], \"actions\": [], \"rewards\": []}\n self.steps_count = 0\n\n return obs\n\n def step(self, action):\n a_dict = {a_props[\"name\"]: action[a_i]\n for a_i, a_props in enumerate(self.config[\"environment\"][\"action\"][\"variables\"])}\n\n self.control_system.step(np.round(a_dict[\"mpc_horizon\"]).astype(np.int32))#np.atleast_1d(int(a_dict[\"mpc_compute\"])))\n self.history[\"actions\"].append(a_dict)\n self.steps_count += 1\n\n info = {}\n obs = self.get_observation()\n done = False\n if self.steps_count >= self.max_steps:\n done = True\n info[\"termination\"] = \"steps\"\n elif len(self.config[\"environment\"].get(\"end_on_constraint_violation\", [])) > 0:\n for c_name, c_d in self.control_system.get_constraint_distances().items():\n if c_name.split(\"-\")[1] in self.config[\"environment\"][\"end_on_constraint_violation\"] and c_d > 0:\n done = True\n info[\"termination\"] = \"constraint\"\n break\n\n rew = self.get_reward(done=done)\n for category, v in self.config[\"environment\"].get(\"info\", {}).items():\n if category == \"reward\":\n for rew_name, rew_expr in v.items():\n info[\"reward/{}\".format(rew_name)] = self.get_reward(rew_expr, done=done)\n else:\n raise NotImplementedError\n\n if self.value_function_is_set:\n step_vf_data = {\"mpc_state\": self.control_system.get_state_vector(self.control_system.history[\"state\"][-2]),\n \"mpc_next_state\": self.control_system.controller.mpc_state_preds[:, -1, -1]}\n step_vf_data[\"mpc_n_horizon\"] = self.control_system.controller.history[\"mpc_horizon\"][-1]\n info[\"mpc_value_fn\"] = (self.control_system.controller.value_function.eval([step_vf_data[\"mpc_next_state\"].reshape(1, -1)])[0][0, 0]).astype(np.float64)\n step_vf_data[\"mpc_rewards\"] = self.control_system.controller.mpc.opt_f_num.toarray()[0, 0] - \\\n self.config[\"mpc\"][\"objective\"].get(\"discount_factor\") ** (step_vf_data[\"mpc_n_horizon\"] + 1) * info[\"mpc_value_fn\"]\n info[\"mpc_computation_time\"] = sum([v for k, v in self.control_system.controller.mpc.solver_stats.items() if k.startswith(\"t_proc\")])\n info[\"data\"] = step_vf_data\n info[\"mpc_avg_stage_cost\"] = step_vf_data[\"mpc_rewards\"] / step_vf_data[\"mpc_n_horizon\"]\n\n info.update({k: v.astype(np.float64) if hasattr(v, \"dtype\") else v for k, v in a_dict.items()})\n\n self.history[\"obs\"].append(obs)\n self.history[\"rewards\"].append(rew)\n\n return obs, rew, done, info\n\n def render(self, mode='human', save_path=None): # TODO: add env renders\n figure, axes = None, None\n if self.viewer is None:\n env_plots = [plot_name for plot_name, make_plot in self.config[\"environment\"][\"render\"].items() if make_plot]\n if len(env_plots) > 0:\n figure, axes = plt.subplots(self.control_system.render_n_axes + len(env_plots), sharex=True,\n figsize=(9, 16))\n self.viewer = self.control_system.render(figure=figure, axes=axes, return_viewer=True)\n for i, plot in enumerate(env_plots):\n self.viewer[\"axes\"][plot] = axes[-(i + 1)]\n else:\n self.viewer = self.control_system.render(figure=figure, axes=axes, return_viewer=True)\n for plot_name, make_plot in self.config[\"environment\"][\"render\"].items():\n if make_plot:\n self.viewer[\"axes\"][plot_name].set_ylabel(\"-\".join(plot_name.split(\"_\")[1:]))\n x_data = np.array(range(self.steps_count)) * self.control_system.config[\"params\"][\"t_step\"]\n self.viewer[\"axes\"][plot_name].clear()\n if plot_name == \"plot_action\":\n for a_var in self.config[\"environment\"][\"action\"][\"variables\"]:\n y_data = [step_a[a_var[\"name\"]] for step_a in self.history[\"actions\"]]\n self.viewer[\"axes\"][plot_name].plot(x_data, y_data, label=a_var[\"name\"], drawstyle=\"steps\")\n elif plot_name == \"plot_reward\":\n self.viewer[\"axes\"][plot_name].plot(x_data, self.history[\"rewards\"], label=\"reward\")\n self.viewer[\"axes\"][plot_name].text(max(x_data) + self.control_system.config[\"params\"][\"t_step\"],\n self.history[\"rewards\"][-1],\n \"{:.3f}\".format(np.sum(self.history[\"rewards\"])))\n else:\n raise ValueError\n for axis in self.viewer[\"axes\"].values():\n axis.legend()\n if save_path is not None:\n self.viewer[\"figure\"].savefig(save_path, bbox_inches=\"tight\", format=\"png\")\n plt.close(self.viewer[\"figure\"])\n else:\n self.viewer[\"figure\"].show()\n\n def get_observation(self):\n obs = []\n for var in self.config[\"environment\"][\"observation\"][\"variables\"]:\n var_val = self._get_variable_value(var)\n for transform in var.get(\"transform\", [\"none\"]):\n if transform == \"none\":\n obs.append(var_val)\n elif transform == \"absolute\":\n obs.append(abs(var_val))\n elif transform == \"square\":\n obs.append(var_val ** 2)\n else:\n raise ValueError\n\n return np.array(obs)\n\n def get_reward(self, rew_expr=None, done=False):\n if rew_expr is None:\n rew_expr = self.config[\"environment\"][\"reward\"][\"expression\"]\n\n rew_expr = str_replace_whole_words(rew_expr, \"done\", int(done))\n\n for var in sorted(self.config[\"environment\"][\"reward\"][\"variables\"], key=lambda x: len(x), reverse=True):\n var_val = self._get_variable_value(var)\n if isinstance(var_val, list) or isinstance(var_val, np.ndarray): # TODO: needs to be better way to do this\n var_val = var_val[0]\n rew_expr = str_replace_whole_words(rew_expr, var[\"name\"], var_val)\n\n return eval(rew_expr)\n\n def _get_variable_value(self, var):\n if var[\"type\"] == \"state\":\n val = self.control_system.current_state[var[\"name\"]]\n elif var[\"type\"] == \"input\":\n if var.get(\"value_type\", \"absolute\") == \"absolute\":\n val = self.control_system.controller.current_input[var[\"name\"]]\n elif var.get(\"value_type\") == \"delta\":\n val = self.control_system.controller.history[\"inputs\"][-2][var[\"name\"]] - \\\n self.control_system.controller.current_input[var[\"name\"]]\n else:\n raise ValueError\n elif var[\"type\"] == \"reference\":\n val = self.control_system.controller.current_reference[var[\"name\"]]\n elif var[\"type\"] == \"tvp\":\n val = self.control_system.tvps[var[\"name\"]].get_values(self.steps_count)\n elif var[\"type\"] == \"error\":\n val = self.control_system.controller.history[\"errors\"][-1][var[\"name\"]]\n if np.isnan(val):\n val = 0\n elif var[\"type\"] == \"epsilon\":\n val = self.control_system.controller.history[\"epsilons\"][-1][var[\"name\"]]\n if np.isnan(val):\n val = 0\n elif var[\"type\"] == \"constraint\":\n if var.get(\"value_type\") == \"distance\":\n val = self.control_system.get_constraint_distances((var[\"name\"],))[var[\"name\"]]\n else:\n raise ValueError\n elif var[\"type\"] == \"action\":\n if var.get(\"value_type\", \"agent\") == \"agent\":\n val = self.history[\"actions\"][-1][var[\"name\"]]\n elif var.get(\"value_type\") == \"controller\":\n val = self.control_system.controller.history[var[\"name\"]][-1]\n else:\n raise ValueError\n elif var[\"type\"] == \"time\":\n if var.get(\"value_type\") == \"fraction\":\n val = self.control_system.controller.steps_since_mpc_computation / self.control_system.controller.mpc.n_horizon\n elif var.get(\"value_type\") == \"absolute\":\n val = self.control_system.controller.steps_since_mpc_computation\n else:\n raise ValueError\n elif var[\"type\"] == \"parameter\":\n if var[\"value_type\"] in [\"plant\", \"mpc\", \"lqr\"]:\n val = self.config[var[\"value_type\"]][\"model\"][\"parameters\"][var[\"name\"]]\n else:\n raise ValueError\n else:\n raise ValueError\n\n if isinstance(val, np.ndarray):\n val = val[0]\n if \"limits\" in var:\n val = np.clip(val, var[\"limits\"][0], var[\"limits\"][1])\n\n return val\n\n def sample_constraints(self):\n constraints = {}\n for c_name, c_props in self.config[\"environment\"].get(\"randomize\", {}).get(\"constraints\", {}).items():\n constraint_val = getattr(self.np_random, c_props[\"type\"])(**c_props[\"kw\"])\n if c_name.split(\"-\")[1] in [k.split(\"-\")[1] for k in constraints.keys()]:\n other_bound_type = \"u\" if c_name.split(\"-\")[2] == \"l\" else \"l\"\n other_bound_val = constraints[c_name[:-1] + other_bound_type]\n if other_bound_type == \"u\":\n constraint_val = min(other_bound_val - self.min_constraint_delta, constraint_val)\n else:\n constraint_val = max(other_bound_val + self.min_constraint_delta, constraint_val)\n constraints[c_name] = constraint_val\n return constraints\n\n def sample_state(self):\n state = {}\n for s_name, s_props in self.config[\"environment\"].get(\"randomize\", {}).get(\"state\", {}).items():\n state[s_name] = getattr(self.np_random, s_props[\"type\"])(**s_props[\"kw\"])\n\n return state\n\n def sample_reference(self):\n reference = {}\n for r_name, r_props in self.config[\"environment\"].get(\"randomize\", {}).get(\"reference\", {}).items():\n reference[r_name] = getattr(self.np_random, r_props[\"type\"])(**r_props[\"kw\"])\n\n return reference\n\n def sample_model(self):\n model = {}\n for s_name, s_props in self.config[\"environment\"].get(\"randomize\", {}).get(\"model\", {}).get(\"states\", {}).items():\n model[\"states\"] = {s_name: {}}\n for component_name, component_props in s_props.items():\n model[\"states\"][s_name][component_name] = \\\n {comp_v_name: getattr(self.np_random, v_prop[\"type\"])(**v_prop[\"kw\"])\n for comp_v_name, v_prop in component_props.items()}\n\n model = {dest: model for dest in self.config[\"environment\"].get(\"randomize\", {}).get(\"model\", {}).get(\"apply\", [])}\n return model\n\n def stop(self):\n pass\n\n def create_dataset(self, n_scenarios):\n dataset = []\n self.reset()\n for i in range(n_scenarios):\n process_noise = np.array([self.control_system._get_process_noise() for i in range(self.max_steps)])\n ep_dict = {\"state\": self.sample_state(), \"reference\": self.sample_reference(),\n \"constraint\": self.sample_constraints(), \"model\": self.sample_model(),\n \"process_noise\": {}, \"tvp\": {}}\n s_i = 0\n for s_name, s_props in self.config[\"plant\"][\"model\"][\"states\"].items():\n if \"W\" in s_props:\n ep_dict[\"process_noise\"][s_name] = process_noise[:, s_i]\n s_i += 1\n for tvp_name, tvp_obj in self.control_system.tvps.items():\n tvp_obj.generate_values(self.max_steps)\n ep_dict[\"tvp\"][tvp_name] = tvp_obj.values\n dataset.append(ep_dict)\n self.reset()\n\n return dataset\n\n def set_value_function(self, input_ph, output_ph, tf_session):\n self.control_system.controller.set_value_function(input_ph, output_ph, tf_session)\n self.value_function_is_set = True\n\n def set_learning_status(self, status):\n if self.value_function_is_set:\n self.control_system.controller.value_function.set_enabled(status)\n\n\nif __name__ == \"__main__\": # TODO: constraints on pendulum and end episode if constraints violated\n env = LetMPCEnv(\"configs/cart_pendulum_horizon.json\")\n env.seed(0)\n\n \"\"\"\n from tensorflow_casadi import TensorFlowEvaluator, MLP\n import tensorflow as tf\n a = tf.placeholder(shape=(None, 4), dtype=tf.float32)\n mlp = MLP(a)\n sess = tf.Session()\n val_fun = TensorFlowEvaluator([mlp.input_ph], [mlp.output], sess)\n env.set_value_function(mlp.input_ph, mlp.output, sess)\n \"\"\"\n\n import pickle\n with open(\"../../lmpc-horizon/datasets/cart_pendulum_10.pkl\", \"rb\") as f:\n test_set = pickle.load(f)\n\n rews = {}\n\n for i in range(1):\n import time\n obs = env.reset(**test_set[5])\n\n done = False\n t_before = time.process_time()\n horizon = 10\n while not done:\n t_step = time.process_time()\n if env.steps_count % 1 == 0 and False:\n horizon = 25 if horizon == 50 else 50\n obs, rew, done, info = env.step([horizon])#[np.random.randint(1, 10)])\n for rew_comp, v in info.items():\n if rew_comp.startswith(\"reward/\"):\n if rew_comp not in rews:\n rews[rew_comp] = []\n rews[rew_comp].append(v)\n if time.process_time() - t_step > 1:\n print(env.control_system.controller.mpc.solver_stats)\n print(env.steps_count)\n\n for k, v in rews.items():\n print(\"{}: {}\".format(k, sum(v)))\n print(\"Elapsed time {}\".format(time.process_time() - t_before))\n env.render()\n\n\n \n\n\n\n" ]
[ [ "numpy.clip", "numpy.isnan", "numpy.finfo", "numpy.round", "matplotlib.pyplot.close", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vishalbelsare/estimagic
[ "afae1be3a1566056d11962c495b67e64bc4a0822", "afae1be3a1566056d11962c495b67e64bc4a0822", "afae1be3a1566056d11962c495b67e64bc4a0822", "afae1be3a1566056d11962c495b67e64bc4a0822" ]
[ "estimagic/tests/differentiation/test_derivatives.py", "estimagic/optimization/nlopt_optimizers.py", "estimagic/tests/test_utilities.py", "estimagic/visualization/derivative_plot.py" ]
[ "from functools import partial\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom pandas.testing import assert_frame_equal\nfrom scipy.optimize._numdiff import approx_derivative\n\nfrom estimagic.differentiation.derivatives import _consolidate_one_step_derivatives\nfrom estimagic.differentiation.derivatives import _convert_evaluation_data_to_frame\nfrom estimagic.differentiation.derivatives import (\n _convert_richardson_candidates_to_frame,\n)\nfrom estimagic.differentiation.derivatives import _nan_skipping_batch_evaluator\nfrom estimagic.differentiation.derivatives import _select_minimizer_along_axis\nfrom estimagic.differentiation.derivatives import first_derivative\nfrom estimagic.examples.numdiff_functions import logit_loglike\nfrom estimagic.examples.numdiff_functions import logit_loglike_gradient\nfrom estimagic.examples.numdiff_functions import logit_loglikeobs\nfrom estimagic.examples.numdiff_functions import logit_loglikeobs_jacobian\nfrom estimagic.utilities import namedtuple_from_kwargs\n\n\[email protected]\ndef binary_choice_inputs():\n fix_path = Path(__file__).resolve().parent / \"binary_choice_inputs.pickle\"\n inputs = pd.read_pickle(fix_path)\n return inputs\n\n\nmethods = [\"forward\", \"backward\", \"central\"]\n\n\[email protected](\"method\", methods)\ndef test_first_derivative_jacobian(binary_choice_inputs, method):\n fix = binary_choice_inputs\n func = partial(logit_loglikeobs, y=fix[\"y\"], x=fix[\"x\"])\n\n calculated = first_derivative(\n func=func,\n method=method,\n params=fix[\"params_np\"],\n n_steps=1,\n base_steps=None,\n lower_bounds=np.full(fix[\"params_np\"].shape, -np.inf),\n upper_bounds=np.full(fix[\"params_np\"].shape, np.inf),\n min_steps=1e-8,\n step_ratio=2.0,\n f0=func(fix[\"params_np\"]),\n n_cores=1,\n )\n\n expected = logit_loglikeobs_jacobian(fix[\"params_np\"], fix[\"y\"], fix[\"x\"])\n\n aaae(calculated[\"derivative\"], expected, decimal=6)\n\n\ndef test_first_derivative_jacobian_works_at_defaults(binary_choice_inputs):\n fix = binary_choice_inputs\n func = partial(logit_loglikeobs, y=fix[\"y\"], x=fix[\"x\"])\n calculated = first_derivative(func=func, params=fix[\"params_np\"], n_cores=1)\n expected = logit_loglikeobs_jacobian(fix[\"params_np\"], fix[\"y\"], fix[\"x\"])\n aaae(calculated[\"derivative\"], expected, decimal=6)\n\n\[email protected](\"method\", methods)\ndef test_first_derivative_gradient(binary_choice_inputs, method):\n fix = binary_choice_inputs\n func = partial(logit_loglike, y=fix[\"y\"], x=fix[\"x\"])\n\n calculated = first_derivative(\n func=func,\n method=method,\n params=fix[\"params_np\"],\n n_steps=1,\n f0=func(fix[\"params_np\"]),\n n_cores=1,\n )\n\n expected = logit_loglike_gradient(fix[\"params_np\"], fix[\"y\"], fix[\"x\"])\n\n aaae(calculated[\"derivative\"], expected, decimal=4)\n\n\[email protected](\"method\", methods)\ndef test_first_derivative_scalar(method):\n def f(x):\n return x ** 2\n\n calculated = first_derivative(f, 3.0, n_cores=1)\n expected = 6.0\n assert calculated[\"derivative\"] == expected\n\n\[email protected](\"method\", methods)\ndef test_first_derivative_scalar_with_return_func_value(method):\n def f(x):\n return x ** 2\n\n calculated = first_derivative(\n f, 3.0, return_func_value=True, return_info=False, n_cores=1\n )\n expected = {\"derivative\": 6.0, \"func_value\": 9.0}\n assert calculated == expected\n\n\ndef test_nan_skipping_batch_evaluator():\n arglist = [np.nan, np.ones(2), np.array([3, 4]), np.nan, np.array([1, 2])]\n expected = [\n np.full(2, np.nan),\n np.ones(2),\n np.array([9, 16]),\n np.full(2, np.nan),\n np.array([1, 4]),\n ]\n calculated = _nan_skipping_batch_evaluator(\n func=lambda x: x ** 2,\n arguments=arglist,\n n_cores=1,\n error_handling=\"continue\",\n batch_evaluator=\"joblib\",\n )\n for arr_calc, arr_exp in zip(calculated, expected):\n if np.isnan(arr_exp).all():\n assert np.isnan(arr_calc).all()\n else:\n aaae(arr_calc, arr_exp)\n\n\ndef test_consolidate_one_step_derivatives():\n forward = np.ones((1, 4, 3))\n forward[:, :, 0] = np.nan\n backward = np.zeros_like(forward)\n\n calculated = _consolidate_one_step_derivatives(\n {\"forward\": forward, \"backward\": backward}, [\"forward\", \"backward\"]\n )\n expected = np.array([[0, 1, 1]] * 4)\n aaae(calculated, expected)\n\n\[email protected]()\ndef example_function_gradient_fixtures():\n def f(x):\n \"\"\"f:R^3 -> R\"\"\"\n x1, x2, x3 = x[0], x[1], x[2]\n y1 = np.sin(x1) + np.cos(x2) + x3 - x3\n return y1\n\n def fprime(x):\n \"\"\"Gradient(f)(x):R^3 -> R^3\"\"\"\n x1, x2, x3 = x[0], x[1], x[2]\n grad = np.array([np.cos(x1), -np.sin(x2), x3 - x3])\n return grad\n\n return {\"func\": f, \"func_prime\": fprime}\n\n\[email protected]()\ndef example_function_jacobian_fixtures():\n def f(x):\n \"\"\"f:R^3 -> R^2\"\"\"\n x1, x2, x3 = x[0], x[1], x[2]\n y1, y2 = np.sin(x1) + np.cos(x2), np.exp(x3)\n return np.array([y1, y2])\n\n def fprime(x):\n \"\"\"Jacobian(f)(x):R^3 -> R^(2x3)\"\"\"\n x1, x2, x3 = x[0], x[1], x[2]\n jac = np.array([[np.cos(x1), -np.sin(x2), 0], [0, 0, np.exp(x3)]])\n return jac\n\n return {\"func\": f, \"func_prime\": fprime}\n\n\ndef test_first_derivative_gradient_richardson(example_function_gradient_fixtures):\n f = example_function_gradient_fixtures[\"func\"]\n fprime = example_function_gradient_fixtures[\"func_prime\"]\n\n true_fprime = fprime(np.ones(3))\n scipy_fprime = approx_derivative(f, np.ones(3))\n our_fprime = first_derivative(f, np.ones(3), n_steps=3, method=\"central\", n_cores=1)\n\n aaae(scipy_fprime, our_fprime[\"derivative\"])\n aaae(true_fprime, our_fprime[\"derivative\"])\n\n\ndef test_first_derivative_jacobian_richardson(example_function_jacobian_fixtures):\n f = example_function_jacobian_fixtures[\"func\"]\n fprime = example_function_jacobian_fixtures[\"func_prime\"]\n\n true_fprime = fprime(np.ones(3))\n scipy_fprime = approx_derivative(f, np.ones(3))\n our_fprime = first_derivative(f, np.ones(3), n_steps=3, method=\"central\", n_cores=1)\n\n aaae(scipy_fprime, our_fprime[\"derivative\"])\n aaae(true_fprime, our_fprime[\"derivative\"])\n\n\ndef test_convert_evaluation_data_to_frame():\n arr = np.arange(4).reshape(2, 2)\n arr2 = arr.reshape(2, 1, 2)\n steps = namedtuple_from_kwargs(pos=arr, neg=-arr)\n evals = namedtuple_from_kwargs(pos=arr2, neg=-arr2)\n expected = [\n [1, 0, 0, 0, 0, 0],\n [1, 0, 1, 0, 1, 1],\n [1, 1, 0, 0, 2, 2],\n [1, 1, 1, 0, 3, 3],\n [-1, 0, 0, 0, 0, 0],\n [-1, 0, 1, 0, 1, -1],\n [-1, 1, 0, 0, 2, -2],\n [-1, 1, 1, 0, 3, -3],\n ]\n expected = pd.DataFrame(\n expected, columns=[\"sign\", \"step_number\", \"dim_x\", \"dim_f\", \"step\", \"eval\"]\n )\n got = _convert_evaluation_data_to_frame(steps, evals)\n assert_frame_equal(expected, got.reset_index(), check_dtype=False)\n\n\ndef test__convert_richardson_candidates_to_frame():\n jac = {\n \"forward1\": np.array([[0, 1], [2, 3]]),\n \"forward2\": np.array([[0.5, 1], [2, 3]]),\n }\n err = {\n \"forward1\": np.array([[0, 0], [0, 1]]),\n \"forward2\": np.array([[1, 0], [0, 0]]),\n }\n expected = [\n [\"forward\", 1, 0, 0, 0, 0],\n [\"forward\", 1, 1, 0, 1, 0],\n [\"forward\", 1, 0, 1, 2, 0],\n [\"forward\", 1, 1, 1, 3, 1],\n [\"forward\", 2, 0, 0, 0.5, 1],\n [\"forward\", 2, 1, 0, 1, 0],\n [\"forward\", 2, 0, 1, 2, 0],\n [\"forward\", 2, 1, 1, 3, 0],\n ]\n expected = pd.DataFrame(\n expected, columns=[\"method\", \"num_term\", \"dim_x\", \"dim_f\", \"der\", \"err\"]\n )\n expected = expected.set_index([\"method\", \"num_term\", \"dim_x\", \"dim_f\"])\n got = _convert_richardson_candidates_to_frame(jac, err)\n assert_frame_equal(got, expected, check_dtype=False)\n\n\ndef test__select_minimizer_along_axis():\n der = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])\n err = np.array([[[0, 1], [1, 0]], [[1, 0], [0, 1]]])\n expected = (np.array([[0, 5], [6, 3]]), np.array([[0, 0], [0, 0]]))\n got = _select_minimizer_along_axis(der, err)\n aaae(expected, got)\n", "\"\"\"Implement `nlopt` algorithms.\n\nThe documentation is heavily based on (nlopt documentation)[nlopt.readthedocs.io].\n\n\"\"\"\nimport numpy as np\n\nfrom estimagic.config import IS_NLOPT_INSTALLED\nfrom estimagic.optimization.algo_options import CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE\nfrom estimagic.optimization.algo_options import CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE\nfrom estimagic.optimization.algo_options import CONVERGENCE_RELATIVE_CRITERION_TOLERANCE\nfrom estimagic.optimization.algo_options import CONVERGENCE_RELATIVE_PARAMS_TOLERANCE\nfrom estimagic.optimization.algo_options import STOPPING_MAX_CRITERION_EVALUATIONS\nfrom estimagic.optimization.algo_options import (\n STOPPING_MAX_CRITERION_EVALUATIONS_GLOBAL,\n)\n\n\nif IS_NLOPT_INSTALLED:\n import nlopt\n\n\nDEFAULT_ALGO_INFO = {\n \"primary_criterion_entry\": \"value\",\n \"parallelizes\": False,\n \"needs_scaling\": False,\n}\n\n\ndef nlopt_bobyqa(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,\n):\n\n \"\"\"Minimize a scalar function using the BOBYQA algorithm.\n\n The implementation is derived from the BOBYQA subroutine of M. J. D. Powell.\n\n The algorithm performs derivative free bound-constrained optimization using\n an iteratively constructed quadratic approximation for the objective function.\n Due to its use of quadratic appoximation, the algorithm may perform poorly\n for objective functions that are not twice-differentiable.\n\n For details see:\n M. J. D. Powell, \"The BOBYQA algorithm for bound constrained optimization\n without derivatives,\" Department of Applied Mathematics and Theoretical\n Physics, Cambridge England, technical report NA2009/06 (2009).\n\n ``nlopt_bobyqa`` supports the following ``algo_options``:\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n\n \"\"\"\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=nlopt.LN_BOBYQA,\n algorithm_name=\"nlopt_bobyqa\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n\n return out\n\n\ndef nlopt_neldermead(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=0,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,\n):\n \"\"\"Minimize a scalar function using the Nelder-Mead simplex algorithm.\n\n The basic algorithm is described in:\n J. A. Nelder and R. Mead, \"A simplex method for function minimization,\"\n The Computer Journal 7, p. 308-313 (1965).\n\n The difference between the nlopt implementation an the original implementation is\n that the nlopt version supports bounds. This is done by moving all new points that\n would lie outside the bounds exactly on the bounds.\n\n ``nlopt_neldermead`` takes the following ``algo_options``\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n\n Returns:\n dict: See :ref:`internal_optimizer_output` for details.\n\n \"\"\"\n\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=nlopt.LN_NELDERMEAD,\n algorithm_name=\"nlopt_neldermead\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n\n return out\n\n\ndef nlopt_praxis(\n criterion_and_derivative,\n x,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,\n):\n \"\"\"Minimize a scalar function using principal-axis method.\n\n This is a gradient-free local optimizer originally described in:\n Richard Brent, Algorithms for Minimization without Derivatives\n (Prentice-Hall, 1972). (Reprinted by Dover, 2002.). It assumes quadratic\n form of the optimized function and repeatedly updates a set of conjugate\n search directions.\n\n The algorithm is not invariant to scaling of the objective function and may\n fail under its certain rank-preserving transformations (e.g., will lead to\n a non-quadratic shape of the objective function).\n\n The algorithm is not determenistic and it is not possible to achieve\n detereminancy via seed setting.\n\n The algorithm failed on a simple benchmark function with finite parameter bounds.\n Passing arguments `lower_bounds` and `upper_bounds` has been disabled for this\n algorithm.\n\n The difference between the nlopt implementation an the original implementation is\n that the nlopt version supports bounds. This is done by returning infinity (Inf)\n when the constraints are violated. The implementation of bound constraints\n is achieved at the const of significantly reduced speed of convergence.\n In case of bounded constraints, this method is dominated by `nlopt_bobyqa`\n and `nlopt_cobyla`.\n\n ``nlopt_praxis`` takes the following ``algo_options``\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n\n\n Returns:\n dict: See :ref:`internal_optimizer_output` for details.\n\n \"\"\"\n algo_info = DEFAULT_ALGO_INFO.copy()\n algo_info.update({\"needs_scaling\": True})\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds=None,\n upper_bounds=None,\n algorithm=nlopt.LN_PRAXIS,\n algorithm_name=\"nlopt_praxis\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n algo_info=algo_info,\n )\n\n return out\n\n\ndef nlopt_cobyla(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,\n):\n \"\"\"Minimize a scalar function using the cobyla method.\n\n The alggorithm is derived from Powell's Constrained Optimization BY Linear\n Approximations (COBYLA) algorithm. It is a derivative-free optimizer with\n nonlinear inequality and equality constrains, described in:\n\n M. J. D. Powell, \"A direct search optimization method that models the\n objective and constraint functions by linear interpolation,\" in Advances in\n Optimization and Numerical Analysis, eds. S. Gomez and J.-P. Hennart (Kluwer\n Academic: Dordrecht, 1994), p. 51-67\n\n It constructs successive linear approximations of the objective function and\n constraints via a simplex of n+1 points (in n dimensions), and optimizes these\n approximations in a trust region at each step.\n\n The the nlopt implementation differs from the original implementation in a\n a few ways:\n - Incorporates all of the NLopt termination criteria.\n - Adds explicit support for bound constraints.\n - Allows the algorithm to increase the trust-reion radius if the predicted\n imptoovement was approximately right and the simplex is satisfactory.\n - Pseudo-randomizes simplex steps in the algorithm, aimproving robustness by\n avoiding accidentally taking steps that don't improve conditioning, preserving\n the deterministic nature of the algorithm.\n - Supports unequal initial-step sizes in the different parameters.\n\n\n ``nlopt_cobyla`` takes the following ``algo_options``\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n\n Returns:\n dict: See :ref:`internal_optimizer_output` for details.\n\n \"\"\"\n\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=nlopt.LN_COBYLA,\n algorithm_name=\"nlopt_cobyla\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n\n return out\n\n\ndef nlopt_sbplx(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,\n):\n \"\"\"Minimize a scalar function using the \"Subplex\" algorithm.\n\n The alggorithm is a reimplementation of Tom Rowan's \"Subplex\" algorithm.\n See: T. Rowan, \"Functional Stability Analysis of Numerical Algorithms\",\n Ph.D. thesis, Department of Computer Sciences, University of Texas at\n Austin, 1990.\n\n Subplex is a variant of Nedler-Mead that uses Nedler-Mead on a sequence of\n subspaces. It is climed to be more efficient and robust than the original\n Nedler-Mead algorithm.\n\n The difference between this re-implementation and the original algorithm\n of Rowan, is that it explicitly supports bound constraints providing big\n improvement in the case where the optimum lies against one of the constraints.\n\n ``nlopt_sbplx`` takes the following ``algo_options``\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n\n Returns:\n dict: See :ref:`internal_optimizer_output` for details.\n\n \"\"\"\n\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=nlopt.LN_SBPLX,\n algorithm_name=\"nlopt_sbplx\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n\n return out\n\n\ndef nlopt_newuoa(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,\n):\n \"\"\"Minimize a scalar function using the NEWUOA algorithm.\n\n The algorithm is derived from the NEWUOA subroutine of M.J.D Powell which\n uses iteratively constructed quadratic approximation of the objctive fucntion\n to perform derivative-free unconstrained optimization. Fore more details see:\n M. J. D. Powell, \"The NEWUOA software for unconstrained optimization without\n derivatives,\" Proc. 40th Workshop on Large Scale Nonlinear Optimization\n (Erice, Italy, 2004).\n\n The algorithm in `nlopt` has been modified to support bound constraints. If all\n of the bound constraints are infinite, this function calls the `nlopt.LN_NEWUOA`\n optimizers for uncsonstrained optimization. Otherwise, the `nlopt.LN_NEWUOA_BOUND`\n optimizer for constrained problems.\n\n `NEWUOA` requires the dimension n of the parameter space to be `≥ 2`, i.e. the\n implementation does not handle one-dimensional optimization problems.\n\n ``nlopt_newuoa`` takes the following algo_options\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n\n Returns:\n dict: See :ref:`internal_optimizer_output` for details.\n\n \"\"\"\n if np.any(np.isfinite(lower_bounds)) or np.any(np.isfinite(upper_bounds)):\n algo = nlopt.LN_NEWUOA_BOUND\n else:\n algo = nlopt.LN_NEWUOA\n\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=algo,\n algorithm_name=\"nlopt_newuoa\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n\n return out\n\n\ndef nlopt_tnewton(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,\n):\n \"\"\"Minimize a scalar function using the \"TNEWTON\" algorithm.\n\n The alggorithm is based on a Fortran implementation of a preconditioned\n inexact truncated Newton algorithm written by Prof. Ladislav Luksan.\n\n Truncated Newton methods are a set of algorithms designed to solve large scale\n optimization problems. The algorithms use (inaccurate) approximations of the\n solutions to Newton equations, using conjugate gradient methodds, to handle the\n expensive calculations of derivatives during each iteration.\n\n Detailed description of algorithms is given in: R. S. Dembo and T. Steihaug,\n \"Truncated Newton algorithms for large-scale optimization,\" Math. Programming\n 26, p. 190-212 (1983), http://doi.org/10.1007/BF02592055.\n\n ``nlopt_tnewton`` takes the following ``algo_options``\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n\n Returns:\n dict: See :ref:`internal_optimizer_output` for details.\n\n \"\"\"\n\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=nlopt.LD_TNEWTON,\n algorithm_name=\"nlopt_tnewton\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n\n return out\n\n\ndef nlopt_lbfgs(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,\n):\n \"\"\"Minimize a scalar function using the \"LBFGS\" algorithm.\n\n The alggorithm is based on a Fortran implementation of low storage BFGS algorithm\n written by Prof. Ladislav Luksan.\n\n LFBGS is an approximation of the original Broyden–Fletcher–Goldfarb–Shanno algorithm\n based on limited use of memory. Memory efficiency is obtained by preserving a limi-\n ted number (<10) of past updates of candidate points and gradient values and using\n them to approximate the hessian matrix.\n\n Detailed description of algorithms is given in:\n J. Nocedal, \"Updating quasi-Newton matrices with limited storage,\" Math. Comput.\n 35, 773-782 (1980).\n D. C. Liu and J. Nocedal, \"On the limited memory BFGS method for large scale\n optimization,\" ''Math. Programming' 45, p. 503-528 (1989).\n\n ``nlopt_lbfgs`` takes the following ``algo_options``\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n\n\n Returns:\n dict: See :ref:`internal_optimizer_output` for details.\n\n \"\"\"\n\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=nlopt.LD_TNEWTON,\n algorithm_name=\"nlopt_tnewton\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n\n return out\n\n\ndef nlopt_ccsaq(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,\n):\n\n \"\"\"Minimize a scalar function using CCSAQ algorithm.\n\n CCSAQ uses the quadratic variant of the conservative convex separable approximation.\n The algorithm performs gradient based local optimization with equality (but not\n inequality) constraints. At each candidate point x, a quadratic approximation\n to the criterion faunction is computed using the value of gradient at point x. A\n penalty term is incorporated to render optimizaion convex and conservative. The\n algorithm is \"globally convergent\" in the sense that it is guaranteed to con-\n verge to a local optimum from any feasible starting point.\n\n The implementation is based on CCSA algorithm described in:\n Krister Svanberg, \"A class of globally convergent optimization methods based\n on conservative convex separable approximations,\" SIAM J. Optim. 12 (2), p.\n 555-573 (2002)\n\n\n\n ``nlopt_ccsaq`` supports the following ``algo_options``:\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n\n \"\"\"\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=nlopt.LD_CCSAQ,\n algorithm_name=\"nlopt_ccsaq\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n\n return out\n\n\ndef nlopt_mma(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,\n):\n\n \"\"\"Minimize a scalar function using the method of moving asymptotes (MMA).\n\n The implementation is based on an algorithm described in:\n Krister Svanberg, \"A class of globally convergent optimization methods based\n on conservative convex separable approximations,\" SIAM J. Optim. 12 (2), p.\n 555-573 (2002)\n\n The algorithm performs gradient based local optimization with equality (but\n not inequality) constraints. At each candidate point x, an approximation to the\n criterion faunction is computed using the value of gradient at point x. A quadratic\n penalty term is incorporated to render optimizaion convex and conservative. The\n algorithm is \"globally convergent\" in the sense that it is guaranteed to con-\n verge to a local optimum from any feasible starting point.\n\n\n\n ``nlopt_mma`` supports the following ``algo_options``:\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n\n \"\"\"\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=nlopt.LD_MMA,\n algorithm_name=\"nlopt_mma\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n\n return out\n\n\ndef nlopt_var(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,\n rank_1_update=True,\n):\n\n \"\"\"Minimize a scalar function limited memory switching variable-metric method.\n\n The algorithm relies on saving only limited number M of past updates of the\n gradient to approximate the inverse hessian. The large is M, the more memory is\n consumed\n\n Detailed explanation of the algorithm, including its two variations of rank-2 and\n rank-1 methods can be found in the following paper:\n J. Vlcek and L. Luksan, \"Shifted limited-memory variable metric methods for\n large-scale unconstrained minimization,\" J. Computational Appl. Math. 186,\n p. 365-390 (2006).\n\n ``nlopt_svmm`` supports the following ``algo_options``:\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n - rank_1_update (bool): Whether I rank-1 or rank-2 update is used.\n\n \"\"\"\n if rank_1_update:\n algo = nlopt.LD_VAR1\n else:\n algo = nlopt.LD_VAR2\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=algo,\n algorithm_name=\"nlopt_var\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n\n return out\n\n\ndef nlopt_slsqp(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,\n):\n \"\"\"Optimize a scalar function based on SLSQP method.\n\n SLSQP solves gradient based nonlinearly constrained optimization problems.\n The algorithm treats the optimization problem as a sequence of constrained\n least-squares problems.\n\n The implementation is based on the procedure described in:\n Dieter Kraft, \"A software package for sequential quadratic programming\",\n Technical Report DFVLR-FB 88-28, Institut für Dynamik der Flugsysteme,\n Oberpfaffenhofen, July 1988.\n Dieter Kraft, \"Algorithm 733: TOMP–Fortran modules for optimal control\n calculations,\" ACM Transactions on Mathematical Software, vol. 20, no. 3,\n pp. 262-281 (1994).\n\n ``nlopt_slsqp`` supports the following ``algo_options``:\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n\n \"\"\"\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=nlopt.LD_SLSQP,\n algorithm_name=\"nlopt_slsqp\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n return out\n\n\ndef nlopt_direct(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS_GLOBAL,\n locally_biased=False,\n random_search=False,\n unscaled_bounds=False,\n):\n \"\"\"Optimize a scalar function based on DIRECT method.\n\n DIRECT is the DIviding RECTangles algorithm for global optimization, described in:\n D. R. Jones, C. D. Perttunen, and B. E. Stuckmann, \"Lipschitzian optimization\n without the lipschitz constant,\" J. Optimization Theory and Applications, vol.\n 79, p. 157 (1993).\n\n Variations of the algorithm include locally biased routines (distinguished by _L\n suffix) that prove to be more efficients for functions that have few local minima.\n See the following for the DIRECT_L variant:\n\n J. M. Gablonsky and C. T. Kelley, \"A locally-biased form of the DIRECT algorithm,\"\n J. Global Optimization, vol. 21 (1), p. 27-37 (2001).\n\n Locally biased algorithms can be implmented both with deterministic and random\n (distinguished by _RAND suffix) search algorithm.\n\n Finally, both original and locally biased variants can be implemented with and\n without the rescaling of the bound constraints.\n\n Boolean arguments `locally_biased`, 'random_search', and 'unscaled_bouds' can be\n set to `True` or `False` to determine which method is run. The comprehensive list\n of available methods are:\n - DIRECT\n - DIRECT_L\n - DIRECT_L_NOSCAL\n - DIRECT_L_RAND\n - DIRECT_L_RAND_NOSCAL\n - DIRECT_RAND\n\n ``nlopt_direct`` supports the following ``algo_options``:\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n - locally_biased (bool): Whether the \"L\" version of the algorithm is selected.\n - random_search (bool): Whether the randomized version of the algorithm is selected.\n - unscaled_bounds (bool): Whether the \"NOSCAL\" version of the algorithm is selected.\n\n \"\"\"\n if not locally_biased and not random_search and not unscaled_bounds:\n algo = nlopt.GN_DIRECT\n elif locally_biased and not random_search and not unscaled_bounds:\n algo = nlopt.GN_DIRECT_L\n elif locally_biased and not random_search and unscaled_bounds:\n algo = nlopt.GN_DIRECT_L_NOSCAL\n elif locally_biased and random_search and not unscaled_bounds:\n algo = nlopt.GN_DIRECT_L_RAND\n elif locally_biased and random_search and unscaled_bounds:\n algo = nlopt.GN_DIRECT_L_RAND_NOSCAL\n elif not locally_biased and not random_search and unscaled_bounds:\n algo = nlopt.GN_DIRECT_NOSCAL\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=algo,\n algorithm_name=\"nlopt_direct\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n return out\n\n\ndef nlopt_esch(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS_GLOBAL,\n):\n \"\"\"Optimize a scalar function using the ESCH algorithm.\n\n ESCH is an evolutionary algorithm that supports bound constraints only. Specifi\n cally, it does not support nonlinear constraints.\n\n More information on this method can be found in:\n C. H. da Silva Santos, M. S. Goncalves, and H. E. Hernandez-Figueroa, \"Designing\n Novel Photonic Devices by Bio-Inspired Computing,\" IEEE Photonics Technology\n Letters 22 (15), pp. 1177–1179 (2010).\n C. H. da Silva Santos, \"Parallel and Bio-Inspired Computing Applied to Analyze\n Microwave and Photonic Metamaterial Strucutures,\" Ph.D. thesis, University of\n Campinas, (2010).\n H.-G. Beyer and H.-P. Schwefel, \"Evolution Strategies: A Comprehensive Introduction,\n \"Journal Natural Computing, 1 (1), pp. 3–52 (2002).\n Ingo Rechenberg, \"Evolutionsstrategie – Optimierung technischer Systeme nach\n Prinzipien der biologischen Evolution,\" Ph.D. thesis (1971), Reprinted by\n Fromman-Holzboog (1973).\n\n ``nlopt_esch`` supports the following ``algo_options``:\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this\n as convergence.\n\n \"\"\"\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=nlopt.GN_ESCH,\n algorithm_name=\"nlopt_esch\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n return out\n\n\ndef nlopt_isres(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS_GLOBAL,\n):\n \"\"\"Optimize a scalar function using the ISRES algorithm.\n\n ISRES is an implementation of \"Improved Stochastic Evolution Strategy\"\n written for solving optimization problems with non-linear constraints. The\n algorithm is supposed to be a global method, in that it has heuristics to\n avoid local minima. However, no convergence proof is available.\n\n The original method and a refined version can be found, respecively, in:\n Thomas Philip Runarsson and Xin Yao, \"Search biases in constrained\n evolutionary optimization,\" IEEE Trans. on Systems, Man, and Cybernetics\n Part C: Applications and Reviews, vol. 35 (no. 2), pp. 233-243 (2005).\n Thomas P. Runarsson and Xin Yao, \"Stochastic ranking for constrained\n evolutionary optimization,\" IEEE Trans. Evolutionary Computation, vol. 4\n (no. 3), pp. 284-294 (2000).\n\n\n ``nlopt_isres`` supports the following ``algo_options``:\n\n - convergence.relative_params_tolerance (float): Stop when the relative\n movement between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute\n movement between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of\n the criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of\n function evaluation is reached, the optimization stops but we do not count\n this as convergence.\n\n \"\"\"\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=nlopt.GN_ISRES,\n algorithm_name=\"nlopt_isres\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n )\n return out\n\n\ndef nlopt_crs2_lm(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n *,\n convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,\n convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,\n convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,\n convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,\n stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS_GLOBAL,\n population_size=None,\n):\n \"\"\"Optimize a scalar function using the CRS2_LM algorithm.\n\n This implementation of controlled random search method with local mutation is based\n on: P. Kaelo and M. M. Ali, \"Some variants of the controlled random search algorithm\n for global optimization,\" J. Optim. Theory Appl. 130 (2), 253-264 (2006).\n\n The original CRS method is described in: W. L. Price, \"A controlled random search\n procedure for global optimization,\" in Towards Global Optimization 2, p. 71-84\n edited by L. C. W. Dixon and G. P. Szego (North-Holland Press, Amsterdam, 1978). W.\n L. Price, \"Global optimization by controlled random search,\" J. Optim. Theory Appl.\n 40 (3), p. 333-348 (1983).\n\n CRS class of algorithms starts with random population of points and evolves the\n points \"randomly\". The size of the initial population can be set via the param-\n meter population_size. If the user doesn't specify a value, it is set to the nlopt\n default of 10*(n+1).\n\n ``nlopt_isres`` supports the following ``algo_options``:\n\n - convergence.relative_params_tolerance (float): Stop when the relative movement\n between parameter vectors is smaller than this.\n - convergence.absolute_params_tolerance (float): Stop when the absolute movement\n between parameter vectors is smaller than this.\n - convergence.relative_criterion_tolerance (float): Stop when the relative\n improvement between two iterations is smaller than this.\n - convergence.absolute_criterion_tolerance (float): Stop when the change of the\n criterion function between two iterations is smaller than this.\n - stopping.max_criterion_evaluations (int): If the maximum number of function\n evaluation is reached, the optimization stops but we do not count this as\n convergence.\n - population_size (int): Size of the population. If None, it's set to be\n 10 * (number of parameters + 1).\n\n \"\"\"\n if population_size is None:\n population_size = 10 * (len(x) + 1)\n out = _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm=nlopt.GN_CRS2_LM,\n algorithm_name=\"nlopt_crs2_lm\",\n convergence_xtol_rel=convergence_relative_params_tolerance,\n convergence_xtol_abs=convergence_absolute_params_tolerance,\n convergence_ftol_rel=convergence_relative_criterion_tolerance,\n convergence_ftol_abs=convergence_absolute_criterion_tolerance,\n stopping_max_eval=stopping_max_criterion_evaluations,\n population_size=population_size,\n )\n return out\n\n\ndef _minimize_nlopt(\n criterion_and_derivative,\n x,\n lower_bounds,\n upper_bounds,\n algorithm,\n algorithm_name,\n *,\n convergence_xtol_rel=None,\n convergence_xtol_abs=None,\n convergence_ftol_rel=None,\n convergence_ftol_abs=None,\n stopping_max_eval=None,\n population_size=None,\n algo_info=None,\n):\n \"\"\"Run actual nlopt optimization argument, set relevant attributes.\"\"\"\n if algo_info is None:\n algo_info = DEFAULT_ALGO_INFO.copy()\n else:\n algo_info = algo_info.copy()\n algo_info[\"name\"] = algorithm_name\n\n def func(x, grad):\n if grad.size > 0:\n criterion, derivative = criterion_and_derivative(\n x,\n task=\"criterion_and_derivative\",\n algorithm_info=algo_info,\n )\n grad[:] = derivative\n else:\n criterion = criterion_and_derivative(\n x,\n task=\"criterion\",\n algorithm_info=algo_info,\n )\n return criterion\n\n opt = nlopt.opt(algorithm, x.shape[0])\n if convergence_ftol_rel is not None:\n opt.set_ftol_rel(convergence_ftol_rel)\n if convergence_ftol_abs is not None:\n opt.set_ftol_abs(convergence_ftol_abs)\n if convergence_xtol_rel is not None:\n opt.set_xtol_rel(convergence_xtol_rel)\n if convergence_xtol_abs is not None:\n opt.set_xtol_abs(convergence_xtol_abs)\n if lower_bounds is not None:\n opt.set_lower_bounds(lower_bounds)\n if upper_bounds is not None:\n opt.set_upper_bounds(upper_bounds)\n if stopping_max_eval is not None:\n opt.set_maxeval(stopping_max_eval)\n if population_size is not None:\n opt.set_population(population_size)\n opt.set_min_objective(func)\n solution_x = opt.optimize(x)\n return _process_nlopt_results(opt, solution_x)\n\n\ndef _process_nlopt_results(nlopt_obj, solution_x):\n messages = {\n 1: \"Convergence achieved \",\n 2: (\n \"Optimizer stopped because maximum value of criterion function was reached\"\n ),\n 3: (\n \"Optimizer stopped because convergence_relative_criterion_tolerance or \"\n + \"convergence_absolute_criterion_tolerance was reached\"\n ),\n 4: (\n \"Optimizer stopped because convergence_relative_params_tolerance or \"\n + \"convergence_absolute_params_tolerance was reached\"\n ),\n 5: \"Optimizer stopped because max_criterion_evaluations was reached\",\n 6: \"Optimizer stopped because max running time was reached\",\n -1: \"Optimizer failed\",\n -2: \"Invalid arguments were passed\",\n -3: \"Memory error\",\n -4: \"Halted because roundoff errors limited progress\",\n -5: \"Halted because of user specified forced stop\",\n }\n processed = {\n \"solution_x\": solution_x,\n \"solution_criterion\": nlopt_obj.last_optimum_value(),\n \"solution_derivative\": None,\n \"solution_hessian\": None,\n \"n_criterion_evaluations\": nlopt_obj.get_numevals(),\n \"n_derivative_evaluations\": None,\n \"n_iterations\": None,\n \"success\": nlopt_obj.last_optimize_result() in [1, 2, 3, 4],\n \"message\": messages[nlopt_obj.last_optimize_result()],\n \"reached_convergence_criterion\": None,\n }\n return processed\n", "import numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom estimagic.config import IS_CYIPOPT_INSTALLED\nfrom estimagic.config import IS_DFOLS_INSTALLED\nfrom estimagic.config import IS_PETSC4PY_INSTALLED\nfrom estimagic.config import IS_PYBOBYQA_INSTALLED\nfrom estimagic.config import IS_PYGMO_INSTALLED\nfrom estimagic.optimization import AVAILABLE_ALGORITHMS\nfrom estimagic.utilities import calculate_trustregion_initial_radius\nfrom estimagic.utilities import chol_params_to_lower_triangular_matrix\nfrom estimagic.utilities import cov_matrix_to_params\nfrom estimagic.utilities import cov_matrix_to_sdcorr_params\nfrom estimagic.utilities import cov_params_to_matrix\nfrom estimagic.utilities import cov_to_sds_and_corr\nfrom estimagic.utilities import dimension_to_number_of_triangular_elements\nfrom estimagic.utilities import hash_array\nfrom estimagic.utilities import number_of_triangular_elements_to_dimension\nfrom estimagic.utilities import robust_cholesky\nfrom estimagic.utilities import robust_inverse\nfrom estimagic.utilities import sdcorr_params_to_matrix\nfrom estimagic.utilities import sdcorr_params_to_sds_and_corr\nfrom estimagic.utilities import sds_and_corr_to_cov\n\n\ndef test_chol_params_to_lower_triangular_matrix():\n calculated = chol_params_to_lower_triangular_matrix(pd.Series([1, 2, 3]))\n expected = np.array([[1, 0], [2, 3]])\n aaae(calculated, expected)\n\n\ndef test_cov_params_to_matrix():\n params = np.array([1, 0.1, 2, 0.2, 0.22, 3])\n expected = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])\n calculated = cov_params_to_matrix(params)\n aaae(calculated, expected)\n\n\ndef test_cov_matrix_to_params():\n expected = np.array([1, 0.1, 2, 0.2, 0.22, 3])\n cov = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])\n calculated = cov_matrix_to_params(cov)\n aaae(calculated, expected)\n\n\ndef test_sdcorr_params_to_sds_and_corr():\n sdcorr_params = pd.Series([1, 2, 3, 0.1, 0.2, 0.3])\n exp_corr = np.array([[1, 0.1, 0.2], [0.1, 1, 0.3], [0.2, 0.3, 1]])\n exp_sds = np.array([1, 2, 3])\n calc_sds, calc_corr = sdcorr_params_to_sds_and_corr(sdcorr_params)\n aaae(calc_sds, exp_sds)\n aaae(calc_corr, exp_corr)\n\n\ndef test_sdcorr_params_to_matrix():\n sds = np.sqrt([1, 2, 3])\n corrs = [0.07071068, 0.11547005, 0.08981462]\n params = np.hstack([sds, corrs])\n expected = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])\n calculated = sdcorr_params_to_matrix(params)\n aaae(calculated, expected)\n\n\ndef test_cov_matrix_to_sdcorr_params():\n sds = np.sqrt([1, 2, 3])\n corrs = [0.07071068, 0.11547005, 0.08981462]\n expected = np.hstack([sds, corrs])\n cov = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])\n calculated = cov_matrix_to_sdcorr_params(cov)\n aaae(calculated, expected)\n\n\ndef test_sds_and_corr_to_cov():\n sds = [1, 2, 3]\n corr = np.ones((3, 3)) * 0.2\n corr[np.diag_indices(3)] = 1\n calculated = sds_and_corr_to_cov(sds, corr)\n expected = np.array([[1.0, 0.4, 0.6], [0.4, 4.0, 1.2], [0.6, 1.2, 9.0]])\n aaae(calculated, expected)\n\n\ndef test_cov_to_sds_and_corr():\n cov = np.array([[1.0, 0.4, 0.6], [0.4, 4.0, 1.2], [0.6, 1.2, 9.0]])\n calc_sds, calc_corr = cov_to_sds_and_corr(cov)\n exp_sds = [1, 2, 3]\n exp_corr = np.ones((3, 3)) * 0.2\n exp_corr[np.diag_indices(3)] = 1\n aaae(calc_sds, exp_sds)\n aaae(calc_corr, exp_corr)\n\n\ndef test_number_of_triangular_elements_to_dimension():\n inputs = [6, 10, 15, 21]\n expected = [3, 4, 5, 6]\n for inp, exp in zip(inputs, expected):\n assert number_of_triangular_elements_to_dimension(inp) == exp\n\n\ndef test_dimension_to_number_of_triangular_elements():\n inputs = [3, 4, 5, 6]\n expected = [6, 10, 15, 21]\n for inp, exp in zip(inputs, expected):\n assert dimension_to_number_of_triangular_elements(inp) == exp\n\n\ndef random_cov(dim, seed):\n np.random.seed(seed)\n\n num_elements = int(dim * (dim + 1) / 2)\n chol = np.zeros((dim, dim))\n chol[np.tril_indices(dim)] = np.random.uniform(size=num_elements)\n cov = chol @ chol.T\n zero_positions = np.random.choice(range(dim), size=int(dim / 5), replace=False)\n for pos in zero_positions:\n cov[:, pos] = 0\n cov[pos] = 0\n return cov\n\n\nseeds = [58822, 3181, 98855, 44002, 47631, 97741, 10655, 4600, 1151, 58189]\ndims = [8] * 6 + [10, 12, 15, 20]\n\n\[email protected](\"dim, seed\", zip(dims, seeds))\ndef test_robust_cholesky_with_zero_variance(dim, seed):\n cov = random_cov(dim, seed)\n chol = robust_cholesky(cov)\n aaae(chol.dot(chol.T), cov)\n assert (chol[np.triu_indices(len(cov), k=1)] == 0).all()\n\n\ndef test_robust_cholesky_with_extreme_cases():\n for cov in [np.ones((5, 5)), np.zeros((5, 5))]:\n chol = robust_cholesky(cov)\n aaae(chol.dot(chol.T), cov)\n\n\ndef test_robust_inverse_nonsingular():\n mat = np.eye(3) + 0.2\n expected = np.linalg.inv(mat)\n calculated = robust_inverse(mat)\n aaae(calculated, expected)\n\n\ndef test_robust_inverse_singular():\n mat = np.zeros((5, 5))\n expected = np.zeros((5, 5))\n with pytest.warns(UserWarning, match=\"LinAlgError\"):\n calculated = robust_inverse(mat)\n aaae(calculated, expected)\n\n\ndef test_hash_array():\n arr1 = np.arange(4)[::2]\n arr2 = np.array([0, 2])\n\n arr3 = np.array([0, 3])\n assert hash_array(arr1) == hash_array(arr2)\n assert hash_array(arr1) != hash_array(arr3)\n\n\ndef test_initial_trust_radius_small_x():\n x = np.array([0.01, 0.01])\n expected = 0.1\n res = calculate_trustregion_initial_radius(x)\n assert expected == pytest.approx(res, abs=1e-8)\n\n\ndef test_initial_trust_radius_large_x():\n x = np.array([20.5, 10])\n expected = 2.05\n res = calculate_trustregion_initial_radius(x)\n assert expected == pytest.approx(res, abs=1e-8)\n\n\ndef test_available_algorithms():\n present_algo_names = AVAILABLE_ALGORITHMS.keys()\n assert \"scipy_lbfgsb\" in present_algo_names\n assert (\"nag_dfols\" in present_algo_names) is IS_DFOLS_INSTALLED\n assert (\"tao_pounders\" in present_algo_names) is IS_PETSC4PY_INSTALLED\n assert (\"nag_pybobyqa\" in present_algo_names) is IS_PYBOBYQA_INSTALLED\n assert (\"pygmo_gaco\" in present_algo_names) is IS_PYGMO_INSTALLED\n assert (\"ipopt\" in present_algo_names) is IS_CYIPOPT_INSTALLED\n assert \"get_scipy_bounds\" not in present_algo_names\n", "\"\"\"Visualize and compare derivative estimates.\"\"\"\nimport itertools\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef derivative_plot(\n derivative_result,\n):\n \"\"\"Plot evaluations and derivative estimates.\n\n The resulting grid plot displays function evaluations and derivatives. The\n derivatives are visualized as a first-order Taylor approximation. Bands are drawn\n indicating the area in which forward and backward derivatives are located. This is\n done by filling the area between the derivative estimate with lowest and highest\n step size, respectively. Do not confuse these bands with statistical errors.\n\n This function does not require the params vector as plots are displayed relative to\n the point at which the derivative is calculated.\n\n Args:\n derivative_result (dict): The result dictionary of call to\n :func:`~estimagic.differentiation.derivatives.first_derivative` with\n return_info and return_func_value set to True.\n\n Returns:\n fig (matplotlib.pyplot.figure): The figure.\n\n \"\"\"\n func_value = derivative_result[\"func_value\"]\n func_evals = derivative_result[\"func_evals\"]\n derivative_candidates = derivative_result[\"derivative_candidates\"]\n\n # remove index from main data for plotting\n df = func_evals.reset_index()\n df = df.assign(**{\"step\": df.step * df.sign})\n func_evals = df.set_index([\"sign\", \"step_number\", \"dim_x\", \"dim_f\"])\n\n # prepare derivative data\n df_der = _select_derivative_with_minimal_error(derivative_candidates)\n df_der_method = _select_derivative_with_minimal_error(\n derivative_candidates, given_method=True\n )\n\n # auxiliary\n grid_points = 2 # we do not need more than 2 grid points since all lines are affine\n func_value = np.atleast_1d(func_value)\n max_steps = df.groupby(\"dim_x\")[\"step\"].max()\n palette = {\n \"forward\": \"tab:green\",\n \"central\": \"tab:blue\",\n \"backward\": \"tab:orange\",\n 1: \"green\",\n -1: \"orange\",\n }\n\n # dimensions of problem. dimensions of params vector span the vertical axis while\n # dimensions of output span the horizontal axis of produced figure\n dim_x = range(df[\"dim_x\"].max() + 1)\n dim_f = range(df[\"dim_f\"].max() + 1)\n\n # plot\n width = 10 * len(dim_f)\n height = 11 * len(dim_x)\n\n fig, axes = plt.subplots(len(dim_x), len(dim_f), figsize=(width, height))\n axes = np.atleast_2d(axes)\n\n for ax, (row, col) in zip(axes.flatten(), itertools.product(dim_x, dim_f)):\n # labels and texts\n ax.set_xlabel(fr\"Value relative to $x_{{0, {row}}}$\", fontsize=14)\n ax.text(\n 0.35,\n 1.02,\n f\"dim_x, dim_f = {row, col}\",\n transform=ax.transAxes,\n color=\"grey\",\n fontsize=14,\n )\n\n # initial values and x grid\n y0 = func_value[col]\n x_grid = np.linspace(-max_steps[row], max_steps[row], grid_points)\n\n # plot function evaluations scatter points\n _scatter_data = func_evals.query(\"dim_x == @row & dim_f == @col\")\n ax.scatter(\n _scatter_data[\"step\"],\n _scatter_data[\"eval\"],\n color=\"gray\",\n label=\"Function Evaluation\",\n edgecolor=\"black\",\n )\n\n # draw overall best derivative estimate\n _y = y0 + x_grid * df_der.loc[row, col]\n ax.plot(\n x_grid,\n _y,\n color=\"black\",\n label=\"Best Estimate\",\n zorder=2,\n linewidth=1.5,\n linestyle=\"dashdot\",\n )\n\n # draw best derivative estimate given each method\n for method in [\"forward\", \"central\", \"backward\"]:\n _y = y0 + x_grid * df_der_method.loc[method, row, col]\n ax.plot(\n x_grid, _y, color=palette[method], label=method, zorder=1, linewidth=2\n )\n\n # fill area\n for sign in [1, -1]:\n _x_y = _select_eval_with_lowest_and_highest_step(func_evals, sign, row, col)\n diff = _x_y - np.array([0, y0])\n slope = diff[:, 1] / diff[:, 0]\n _y = y0 + x_grid * slope.reshape(-1, 1)\n ax.plot(x_grid, _y.T, \"--\", color=palette[sign], linewidth=0.5)\n ax.fill_between(x_grid, _y[0, :], _y[1, :], alpha=0.15, color=palette[sign])\n\n # legend\n ncol = 5 if len(dim_f) > 1 else 3\n axes[0, 0].legend(\n loc=\"upper center\",\n bbox_to_anchor=(len(dim_f) / 2 + 0.05 * len(dim_f), 1.15),\n ncol=ncol,\n fontsize=14,\n )\n return fig\n\n\ndef _select_derivative_with_minimal_error(df_jac_cand, given_method=False):\n \"\"\"Select derivatives with minimal error component wise.\n\n Args:\n df_jac_cand (pandas.DataFrame): Frame containing jacobian candidates.\n given_method (bool): Boolean indicating wether to condition on columns method\n in df_jac_cand. Default is False, which selects the overall best derivative\n estimate.\n\n Returns:\n df (pandas.DataFrame): The (best) derivative estimate.\n\n \"\"\"\n given = [\"method\"] if given_method else []\n minimizer = df_jac_cand.groupby(given + [\"dim_x\", \"dim_f\"])[\"err\"].idxmin()\n df = df_jac_cand.loc[minimizer][\"der\"]\n index_level_to_drop = list({\"method\", \"num_term\"} - set(given))\n df = df.droplevel(index_level_to_drop).copy()\n return df\n\n\ndef _select_eval_with_lowest_and_highest_step(df_evals, sign, dim_x, dim_f):\n \"\"\"Select step and eval from data with highest and lowest step.\n\n Args:\n df_evals (pd.DataFrame): Frame containing func evaluations (long-format).\n sign (int): Direction of step.\n dim_x (int): Dimension of x to select.\n dim_f (int): Dimension of f to select.\n\n Returns:\n out (numpy.ndarray): Array of shape (2, 2). Columns correspond to step and eval,\n while rows correspond to lowest and highest step, respectively.\n\n \"\"\"\n df = df_evals.loc[(sign, slice(None), dim_x, dim_f), [\"step\", \"eval\"]]\n df = df.dropna().sort_index()\n out = df.head(1).append(df.tail(1)).values.copy()\n return out\n" ]
[ [ "pandas.read_pickle", "numpy.isnan", "numpy.arange", "numpy.cos", "pandas.DataFrame", "numpy.ones", "numpy.full", "numpy.sin", "numpy.zeros_like", "pandas.testing.assert_frame_equal", "numpy.exp", "numpy.array", "numpy.testing.assert_array_almost_equal" ], [ "numpy.isfinite" ], [ "numpy.hstack", "numpy.sqrt", "pandas.Series", "numpy.random.seed", "numpy.linalg.inv", "numpy.arange", "numpy.eye", "numpy.tril_indices", "numpy.ones", "numpy.diag_indices", "numpy.random.uniform", "numpy.array", "numpy.zeros", "numpy.testing.assert_array_almost_equal" ], [ "numpy.atleast_1d", "numpy.atleast_2d", "numpy.array", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
weiwei1115/PaddleNLP
[ "dd98f7f8b25b41d39228ba8a958b11a6212709a3" ]
[ "examples/language_model/bert/run_glue.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport os\nimport sys\nimport random\nimport time\nimport math\nimport distutils.util\nfrom functools import partial\n\nimport numpy as np\nimport paddle\nfrom paddle.io import DataLoader\nfrom paddle.metric import Metric, Accuracy, Precision, Recall\n\nfrom paddlenlp.datasets import GlueCoLA, GlueSST2, GlueMRPC, GlueSTSB, GlueQQP, GlueMNLI, GlueQNLI, GlueRTE\nfrom paddlenlp.data import Stack, Tuple, Pad\nfrom paddlenlp.transformers import BertForSequenceClassification, BertTokenizer\nfrom paddlenlp.transformers import ElectraForSequenceClassification, ElectraTokenizer\nfrom paddlenlp.transformers import ErnieForSequenceClassification, ErnieTokenizer\nfrom paddlenlp.transformers import LinearDecayWithWarmup\nfrom paddlenlp.metrics import AccuracyAndF1, Mcc, PearsonAndSpearman\n\nFORMAT = '%(asctime)s-%(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\nTASK_CLASSES = {\n \"cola\": (GlueCoLA, Mcc),\n \"sst-2\": (GlueSST2, Accuracy),\n \"mrpc\": (GlueMRPC, AccuracyAndF1),\n \"sts-b\": (GlueSTSB, PearsonAndSpearman),\n \"qqp\": (GlueQQP, AccuracyAndF1),\n \"mnli\": (GlueMNLI, Accuracy),\n \"qnli\": (GlueQNLI, Accuracy),\n \"rte\": (GlueRTE, Accuracy),\n}\n\nMODEL_CLASSES = {\n \"bert\": (BertForSequenceClassification, BertTokenizer),\n \"ernie\": (ErnieForSequenceClassification, ErnieTokenizer)\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--task_name\",\n default=None,\n type=str,\n required=True,\n help=\"The name of the task to train selected in the list: \" +\n \", \".join(TASK_CLASSES.keys()), )\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" +\n \", \".join(MODEL_CLASSES.keys()), )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \"\n + \", \".join(\n sum([\n list(classes[-1].pretrained_init_configuration.keys())\n for classes in MODEL_CLASSES.values()\n ], [])), )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n parser.add_argument(\n \"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\", )\n parser.add_argument(\n \"--learning_rate\",\n default=1e-4,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\n \"--num_train_epochs\",\n default=3,\n type=int,\n help=\"Total number of training epochs to perform.\", )\n parser.add_argument(\n \"--logging_steps\",\n type=int,\n default=100,\n help=\"Log every X updates steps.\")\n parser.add_argument(\n \"--save_steps\",\n type=int,\n default=100,\n help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--batch_size\",\n default=32,\n type=int,\n help=\"Batch size per GPU/CPU for training.\", )\n parser.add_argument(\n \"--weight_decay\",\n default=0.0,\n type=float,\n help=\"Weight decay if we apply some.\")\n parser.add_argument(\n \"--warmup_steps\",\n default=0,\n type=int,\n help=\"Linear warmup over warmup_steps. If > 0: Override warmup_proportion\"\n )\n parser.add_argument(\n \"--warmup_proportion\",\n default=0.,\n type=float,\n help=\"Linear warmup proportion over total steps.\")\n parser.add_argument(\n \"--adam_epsilon\",\n default=1e-6,\n type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\n \"--seed\", default=42, type=int, help=\"random seed for initialization\")\n parser.add_argument(\n \"--n_cards\",\n default=1,\n type=int,\n help=\"Number cards for the training, only support multi cards in the gpu.\"\n )\n parser.add_argument(\n \"--select_device\",\n type=str,\n default=\"gpu\",\n help=\"Device for selecting for the training.\")\n parser.add_argument(\n \"--use_amp\",\n type=distutils.util.strtobool,\n default=False,\n help=\"Enable mixed precision training.\")\n parser.add_argument(\n \"--scale_loss\",\n type=float,\n default=2**15,\n help=\"The value of scale_loss for fp16.\")\n args = parser.parse_args()\n return args\n\n\ndef set_seed(args):\n # Use the same data seed(for data shuffle) for all procs to guarantee data\n # consistency after sharding.\n random.seed(args.seed)\n np.random.seed(args.seed)\n # Maybe different op seeds(for dropout) for different procs is better. By:\n # `paddle.seed(args.seed + paddle.distributed.get_rank())`\n paddle.seed(args.seed)\n\n\ndef evaluate(model, loss_fct, metric, data_loader):\n model.eval()\n metric.reset()\n for batch in data_loader:\n input_ids, segment_ids, labels = batch\n logits = model(input_ids, segment_ids)\n loss = loss_fct(logits, labels)\n correct = metric.compute(logits, labels)\n metric.update(correct)\n res = metric.accumulate()\n if isinstance(metric, AccuracyAndF1):\n logger.info(\n \"eval loss: %f, acc: %s, precision: %s, recall: %s, f1: %s, acc and f1: %s.\"\n % (loss.numpy(), res[0], res[1], res[2], res[3], res[4]))\n elif isinstance(metric, Mcc):\n logger.info(\"eval loss: %f, mcc: %s.\" % (loss.numpy(), res[0]))\n elif isinstance(metric, PearsonAndSpearman):\n logger.info(\n \"eval loss: %f, pearson: %s, spearman: %s, pearson and spearman: %s.\"\n % (loss.numpy(), res[0], res[1], res[2]))\n else:\n logger.info(\"eval loss: %f, acc: %s.\" % (loss.numpy(), res))\n model.train()\n\n\ndef convert_example(example,\n tokenizer,\n label_list,\n max_seq_length=512,\n is_test=False):\n \"\"\"convert a glue example into necessary features\"\"\"\n\n def _truncate_seqs(seqs, max_seq_length):\n if len(seqs) == 1: # single sentence\n # Account for [CLS] and [SEP] with \"- 2\"\n seqs[0] = seqs[0][0:(max_seq_length - 2)]\n else: # Sentence pair\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n tokens_a, tokens_b = seqs\n max_seq_length -= 3\n while True: # Truncate with longest_first strategy\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_seq_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n return seqs\n\n def _concat_seqs(seqs, separators, seq_mask=0, separator_mask=1):\n concat = sum((seq + sep for sep, seq in zip(separators, seqs)), [])\n segment_ids = sum(\n ([i] * (len(seq) + len(sep))\n for i, (sep, seq) in enumerate(zip(separators, seqs))), [])\n if isinstance(seq_mask, int):\n seq_mask = [[seq_mask] * len(seq) for seq in seqs]\n if isinstance(separator_mask, int):\n separator_mask = [[separator_mask] * len(sep) for sep in separators]\n p_mask = sum((s_mask + mask\n for sep, seq, s_mask, mask in zip(\n separators, seqs, seq_mask, separator_mask)), [])\n return concat, segment_ids, p_mask\n\n if not is_test:\n # `label_list == None` is for regression task\n label_dtype = \"int64\" if label_list else \"float32\"\n # Get the label\n label = example[-1]\n example = example[:-1]\n # Create label maps if classification task\n if label_list:\n label_map = {}\n for (i, l) in enumerate(label_list):\n label_map[l] = i\n label = label_map[label]\n label = np.array([label], dtype=label_dtype)\n\n # Tokenize raw text\n tokens_raw = [tokenizer(l) for l in example]\n # Truncate to the truncate_length,\n tokens_trun = _truncate_seqs(tokens_raw, max_seq_length)\n # Concate the sequences with special tokens\n tokens_trun[0] = [tokenizer.cls_token] + tokens_trun[0]\n tokens, segment_ids, _ = _concat_seqs(tokens_trun, [[tokenizer.sep_token]] *\n len(tokens_trun))\n # Convert the token to ids\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n valid_length = len(input_ids)\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n # input_mask = [1] * len(input_ids)\n if not is_test:\n return input_ids, segment_ids, valid_length, label\n else:\n return input_ids, segment_ids, valid_length\n\n\ndef do_train(args):\n paddle.set_device(args.select_device)\n if paddle.distributed.get_world_size() > 1:\n paddle.distributed.init_parallel_env()\n\n set_seed(args)\n\n args.task_name = args.task_name.lower()\n dataset_class, metric_class = TASK_CLASSES[args.task_name]\n args.model_type = args.model_type.lower()\n model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n\n train_dataset = dataset_class.get_datasets([\"train\"])\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n\n trans_func = partial(\n convert_example,\n tokenizer=tokenizer,\n label_list=train_dataset.get_labels(),\n max_seq_length=args.max_seq_length)\n train_dataset = train_dataset.apply(trans_func, lazy=True)\n train_batch_sampler = paddle.io.DistributedBatchSampler(\n train_dataset, batch_size=args.batch_size, shuffle=True)\n batchify_fn = lambda samples, fn=Tuple(\n Pad(axis=0, pad_val=tokenizer.pad_token_id), # input\n Pad(axis=0, pad_val=tokenizer.pad_token_id), # segment\n Stack(), # length\n Stack(dtype=\"int64\" if train_dataset.get_labels() else \"float32\") # label\n ): [data for i, data in enumerate(fn(samples)) if i != 2]\n train_data_loader = DataLoader(\n dataset=train_dataset,\n batch_sampler=train_batch_sampler,\n collate_fn=batchify_fn,\n num_workers=0,\n return_list=True)\n if args.task_name == \"mnli\":\n dev_dataset_matched, dev_dataset_mismatched = dataset_class.get_datasets(\n [\"dev_matched\", \"dev_mismatched\"])\n dev_dataset_matched = dev_dataset_matched.apply(trans_func, lazy=True)\n dev_dataset_mismatched = dev_dataset_mismatched.apply(\n trans_func, lazy=True)\n dev_batch_sampler_matched = paddle.io.BatchSampler(\n dev_dataset_matched, batch_size=args.batch_size, shuffle=False)\n dev_data_loader_matched = DataLoader(\n dataset=dev_dataset_matched,\n batch_sampler=dev_batch_sampler_matched,\n collate_fn=batchify_fn,\n num_workers=0,\n return_list=True)\n dev_batch_sampler_mismatched = paddle.io.BatchSampler(\n dev_dataset_mismatched, batch_size=args.batch_size, shuffle=False)\n dev_data_loader_mismatched = DataLoader(\n dataset=dev_dataset_mismatched,\n batch_sampler=dev_batch_sampler_mismatched,\n collate_fn=batchify_fn,\n num_workers=0,\n return_list=True)\n else:\n dev_dataset = dataset_class.get_datasets([\"dev\"])\n dev_dataset = dev_dataset.apply(trans_func, lazy=True)\n dev_batch_sampler = paddle.io.BatchSampler(\n dev_dataset, batch_size=args.batch_size, shuffle=False)\n dev_data_loader = DataLoader(\n dataset=dev_dataset,\n batch_sampler=dev_batch_sampler,\n collate_fn=batchify_fn,\n num_workers=0,\n return_list=True)\n\n num_classes = 1 if train_dataset.get_labels() == None else len(\n train_dataset.get_labels())\n model = model_class.from_pretrained(\n args.model_name_or_path, num_classes=num_classes)\n if paddle.distributed.get_world_size() > 1:\n model = paddle.DataParallel(model)\n\n num_training_steps = args.max_steps if args.max_steps > 0 else (\n len(train_data_loader) * args.num_train_epochs)\n warmup = args.warmup_steps if args.warmup_steps > 0 else args.warmup_proportion\n\n lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,\n warmup)\n\n optimizer = paddle.optimizer.AdamW(\n learning_rate=lr_scheduler,\n beta1=0.9,\n beta2=0.999,\n epsilon=args.adam_epsilon,\n parameters=model.parameters(),\n weight_decay=args.weight_decay,\n apply_decay_param_fun=lambda x: x in [\n p.name for n, p in model.named_parameters()\n if not any(nd in n for nd in [\"bias\", \"norm\"])\n ])\n\n loss_fct = paddle.nn.loss.CrossEntropyLoss() if train_dataset.get_labels(\n ) else paddle.nn.loss.MSELoss()\n\n metric = metric_class()\n\n if args.use_amp:\n scaler = paddle.amp.GradScaler(init_loss_scaling=args.scale_loss)\n\n global_step = 0\n tic_train = time.time()\n for epoch in range(args.num_train_epochs):\n for step, batch in enumerate(train_data_loader):\n global_step += 1\n input_ids, segment_ids, labels = batch\n with paddle.amp.auto_cast(\n args.use_amp,\n custom_white_list=[\"layer_norm\", \"softmax\", \"gelu\"]):\n logits = model(input_ids, segment_ids)\n loss = loss_fct(logits, labels)\n if args.use_amp:\n scaler.scale(loss).backward()\n scaler.minimize(optimizer, loss)\n else:\n loss.backward()\n optimizer.step()\n lr_scheduler.step()\n optimizer.clear_gradients()\n if global_step % args.logging_steps == 0:\n logger.info(\n \"global step %d/%d, epoch: %d, batch: %d, rank_id: %s, loss: %f, lr: %.10f, speed: %.4f step/s\"\n % (global_step, num_training_steps, epoch, step,\n paddle.distributed.get_rank(), loss, optimizer.get_lr(),\n args.logging_steps / (time.time() - tic_train)))\n tic_train = time.time()\n if global_step % args.save_steps == 0:\n tic_eval = time.time()\n if args.task_name == \"mnli\":\n evaluate(model, loss_fct, metric, dev_data_loader_matched)\n evaluate(model, loss_fct, metric,\n dev_data_loader_mismatched)\n logger.info(\"eval done total : %s s\" %\n (time.time() - tic_eval))\n else:\n evaluate(model, loss_fct, metric, dev_data_loader)\n logger.info(\"eval done total : %s s\" %\n (time.time() - tic_eval))\n if (not args.n_cards > 1) or paddle.distributed.get_rank() == 0:\n output_dir = os.path.join(args.output_dir,\n \"%s_ft_model_%d.pdparams\" %\n (args.task_name, global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # Need better way to get inner model of DataParallel\n model_to_save = model._layers if isinstance(\n model, paddle.DataParallel) else model\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n\ndef print_arguments(args):\n \"\"\"print arguments\"\"\"\n print('----------- Configuration Arguments -----------')\n for arg, value in sorted(vars(args).items()):\n print('%s: %s' % (arg, value))\n print('------------------------------------------------')\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n print_arguments(args)\n if args.n_cards > 1 and args.select_device == \"gpu\":\n paddle.distributed.spawn(do_train, args=(args, ), nprocs=args.n_cards)\n else:\n do_train(args)\n" ]
[ [ "numpy.array", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
star10919/drf
[ "77c005794087484d72ffc0d76612a6ac9845821e", "77c005794087484d72ffc0d76612a6ac9845821e", "77c005794087484d72ffc0d76612a6ac9845821e", "77c005794087484d72ffc0d76612a6ac9845821e", "77c005794087484d72ffc0d76612a6ac9845821e" ]
[ "venv/Lib/site-packages/sklearn/linear_model/_base.py", "venv/Lib/site-packages/sklearn/inspection/tests/test_partial_dependence.py", "venv/Lib/site-packages/sklearn/ensemble/_weight_boosting.py", "venv/Lib/site-packages/sklearn/tree/_classes.py", "venv/Lib/site-packages/folium/features.py" ]
[ "\"\"\"\nGeneralized Linear Models.\n\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>\n# Fabian Pedregosa <[email protected]>\n# Olivier Grisel <[email protected]>\n# Vincent Michel <[email protected]>\n# Peter Prettenhofer <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Lars Buitinck\n# Maryan Morel <[email protected]>\n# Giorgio Patrini <[email protected]>\n# License: BSD 3 clause\n\nfrom abc import ABCMeta, abstractmethod\nimport numbers\nimport warnings\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy import linalg\nfrom scipy import optimize\nfrom scipy import sparse\nfrom scipy.special import expit\nfrom joblib import Parallel\n\nfrom ..base import (BaseEstimator, ClassifierMixin, RegressorMixin,\n MultiOutputMixin)\nfrom ..utils import check_array\nfrom ..utils.validation import FLOAT_DTYPES\nfrom ..utils.validation import _deprecate_positional_args\nfrom ..utils import check_random_state\nfrom ..utils.extmath import safe_sparse_dot\nfrom ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale\nfrom ..utils.fixes import sparse_lsqr\nfrom ..utils._seq_dataset import ArrayDataset32, CSRDataset32\nfrom ..utils._seq_dataset import ArrayDataset64, CSRDataset64\nfrom ..utils.validation import check_is_fitted, _check_sample_weight\nfrom ..utils.fixes import delayed\nfrom ..preprocessing import normalize as f_normalize\n\n# TODO: bayesian_ridge_regression and bayesian_regression_ard\n# should be squashed into its respective objects.\n\nSPARSE_INTERCEPT_DECAY = 0.01\n# For sparse data intercept updates are scaled by this decay factor to avoid\n# intercept oscillation.\n\n\ndef make_dataset(X, y, sample_weight, random_state=None):\n \"\"\"Create ``Dataset`` abstraction for sparse and dense inputs.\n\n This also returns the ``intercept_decay`` which is different\n for sparse datasets.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data\n\n y : array-like, shape (n_samples, )\n Target values.\n\n sample_weight : numpy array of shape (n_samples,)\n The weight of each sample\n\n random_state : int, RandomState instance or None (default)\n Determines random number generation for dataset shuffling and noise.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n dataset\n The ``Dataset`` abstraction\n intercept_decay\n The intercept decay\n \"\"\"\n\n rng = check_random_state(random_state)\n # seed should never be 0 in SequentialDataset64\n seed = rng.randint(1, np.iinfo(np.int32).max)\n\n if X.dtype == np.float32:\n CSRData = CSRDataset32\n ArrayData = ArrayDataset32\n else:\n CSRData = CSRDataset64\n ArrayData = ArrayDataset64\n\n if sp.issparse(X):\n dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight,\n seed=seed)\n intercept_decay = SPARSE_INTERCEPT_DECAY\n else:\n X = np.ascontiguousarray(X)\n dataset = ArrayData(X, y, sample_weight, seed=seed)\n intercept_decay = 1.0\n\n return dataset, intercept_decay\n\n\ndef _preprocess_data(X, y, fit_intercept, normalize=False, copy=True,\n sample_weight=None, return_mean=False, check_input=True):\n \"\"\"Center and scale data.\n\n Centers data to have mean zero along axis 0. If fit_intercept=False or if\n the X is a sparse matrix, no centering is done, but normalization can still\n be applied. The function returns the statistics necessary to reconstruct\n the input data, which are X_offset, y_offset, X_scale, such that the output\n\n X = (X - X_offset) / X_scale\n\n X_scale is the L2 norm of X - X_offset. If sample_weight is not None,\n then the weighted mean of X and y is zero, and not the mean itself. If\n return_mean=True, the mean, eventually weighted, is returned, independently\n of whether X was centered (option used for optimization with sparse data in\n coordinate_descend).\n\n This is here because nearly all linear models will want their data to be\n centered. This function also systematically makes y consistent with X.dtype\n \"\"\"\n if isinstance(sample_weight, numbers.Number):\n sample_weight = None\n if sample_weight is not None:\n sample_weight = np.asarray(sample_weight)\n\n if check_input:\n X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'],\n dtype=FLOAT_DTYPES)\n elif copy:\n if sp.issparse(X):\n X = X.copy()\n else:\n X = X.copy(order='K')\n\n y = np.asarray(y, dtype=X.dtype)\n\n if fit_intercept:\n if sp.issparse(X):\n X_offset, X_var = mean_variance_axis(X, axis=0)\n if not return_mean:\n X_offset[:] = X.dtype.type(0)\n\n if normalize:\n\n # TODO: f_normalize could be used here as well but the function\n # inplace_csr_row_normalize_l2 must be changed such that it\n # can return also the norms computed internally\n\n # transform variance to norm in-place\n X_var *= X.shape[0]\n X_scale = np.sqrt(X_var, X_var)\n del X_var\n X_scale[X_scale == 0] = 1\n inplace_column_scale(X, 1. / X_scale)\n else:\n X_scale = np.ones(X.shape[1], dtype=X.dtype)\n\n else:\n X_offset = np.average(X, axis=0, weights=sample_weight)\n X -= X_offset\n if normalize:\n X, X_scale = f_normalize(X, axis=0, copy=False,\n return_norm=True)\n else:\n X_scale = np.ones(X.shape[1], dtype=X.dtype)\n y_offset = np.average(y, axis=0, weights=sample_weight)\n y = y - y_offset\n else:\n X_offset = np.zeros(X.shape[1], dtype=X.dtype)\n X_scale = np.ones(X.shape[1], dtype=X.dtype)\n if y.ndim == 1:\n y_offset = X.dtype.type(0)\n else:\n y_offset = np.zeros(y.shape[1], dtype=X.dtype)\n\n return X, y, X_offset, y_offset, X_scale\n\n\n# TODO: _rescale_data should be factored into _preprocess_data.\n# Currently, the fact that sag implements its own way to deal with\n# sample_weight makes the refactoring tricky.\n\ndef _rescale_data(X, y, sample_weight):\n \"\"\"Rescale data sample-wise by square root of sample_weight.\n\n For many linear models, this enables easy support for sample_weight.\n\n Returns\n -------\n X_rescaled : {array-like, sparse matrix}\n\n y_rescaled : {array-like, sparse matrix}\n \"\"\"\n n_samples = X.shape[0]\n sample_weight = np.asarray(sample_weight)\n if sample_weight.ndim == 0:\n sample_weight = np.full(n_samples, sample_weight,\n dtype=sample_weight.dtype)\n sample_weight = np.sqrt(sample_weight)\n sw_matrix = sparse.dia_matrix((sample_weight, 0),\n shape=(n_samples, n_samples))\n X = safe_sparse_dot(sw_matrix, X)\n y = safe_sparse_dot(sw_matrix, y)\n return X, y\n\n\nclass LinearModel(BaseEstimator, metaclass=ABCMeta):\n \"\"\"Base class for Linear Models\"\"\"\n\n @abstractmethod\n def fit(self, X, y):\n \"\"\"Fit model.\"\"\"\n\n def _decision_function(self, X):\n check_is_fitted(self)\n\n X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])\n return safe_sparse_dot(X, self.coef_.T,\n dense_output=True) + self.intercept_\n\n def predict(self, X):\n \"\"\"\n Predict using the linear model.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape (n_samples, n_features)\n Samples.\n\n Returns\n -------\n C : array, shape (n_samples,)\n Returns predicted values.\n \"\"\"\n return self._decision_function(X)\n\n _preprocess_data = staticmethod(_preprocess_data)\n\n def _set_intercept(self, X_offset, y_offset, X_scale):\n \"\"\"Set the intercept_\n \"\"\"\n if self.fit_intercept:\n self.coef_ = self.coef_ / X_scale\n self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)\n else:\n self.intercept_ = 0.\n\n def _more_tags(self):\n return {'requires_y': True}\n\n\n# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.\n# Maybe the n_features checking can be moved to LinearModel.\nclass LinearClassifierMixin(ClassifierMixin):\n \"\"\"Mixin for linear classifiers.\n\n Handles prediction for sparse and dense X.\n \"\"\"\n\n def decision_function(self, X):\n \"\"\"\n Predict confidence scores for samples.\n\n The confidence score for a sample is proportional to the signed\n distance of that sample to the hyperplane.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape (n_samples, n_features)\n Samples.\n\n Returns\n -------\n array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)\n Confidence scores per (sample, class) combination. In the binary\n case, confidence score for self.classes_[1] where >0 means this\n class would be predicted.\n \"\"\"\n check_is_fitted(self)\n\n X = check_array(X, accept_sparse='csr')\n\n n_features = self.coef_.shape[1]\n if X.shape[1] != n_features:\n raise ValueError(\"X has %d features per sample; expecting %d\"\n % (X.shape[1], n_features))\n\n scores = safe_sparse_dot(X, self.coef_.T,\n dense_output=True) + self.intercept_\n return scores.ravel() if scores.shape[1] == 1 else scores\n\n def predict(self, X):\n \"\"\"\n Predict class labels for samples in X.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape (n_samples, n_features)\n Samples.\n\n Returns\n -------\n C : array, shape [n_samples]\n Predicted class label per sample.\n \"\"\"\n scores = self.decision_function(X)\n if len(scores.shape) == 1:\n indices = (scores > 0).astype(int)\n else:\n indices = scores.argmax(axis=1)\n return self.classes_[indices]\n\n def _predict_proba_lr(self, X):\n \"\"\"Probability estimation for OvR logistic regression.\n\n Positive class probabilities are computed as\n 1. / (1. + np.exp(-self.decision_function(X)));\n multiclass is handled by normalizing that over all classes.\n \"\"\"\n prob = self.decision_function(X)\n expit(prob, out=prob)\n if prob.ndim == 1:\n return np.vstack([1 - prob, prob]).T\n else:\n # OvR normalization, like LibLinear's predict_probability\n prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))\n return prob\n\n\nclass SparseCoefMixin:\n \"\"\"Mixin for converting coef_ to and from CSR format.\n\n L1-regularizing estimators should inherit this.\n \"\"\"\n\n def densify(self):\n \"\"\"\n Convert coefficient matrix to dense array format.\n\n Converts the ``coef_`` member (back) to a numpy.ndarray. This is the\n default format of ``coef_`` and is required for fitting, so calling\n this method is only required on models that have previously been\n sparsified; otherwise, it is a no-op.\n\n Returns\n -------\n self\n Fitted estimator.\n \"\"\"\n msg = \"Estimator, %(name)s, must be fitted before densifying.\"\n check_is_fitted(self, msg=msg)\n if sp.issparse(self.coef_):\n self.coef_ = self.coef_.toarray()\n return self\n\n def sparsify(self):\n \"\"\"\n Convert coefficient matrix to sparse format.\n\n Converts the ``coef_`` member to a scipy.sparse matrix, which for\n L1-regularized models can be much more memory- and storage-efficient\n than the usual numpy.ndarray representation.\n\n The ``intercept_`` member is not converted.\n\n Returns\n -------\n self\n Fitted estimator.\n\n Notes\n -----\n For non-sparse models, i.e. when there are not many zeros in ``coef_``,\n this may actually *increase* memory usage, so use this method with\n care. A rule of thumb is that the number of zero elements, which can\n be computed with ``(coef_ == 0).sum()``, must be more than 50% for this\n to provide significant benefits.\n\n After calling this method, further fitting with the partial_fit\n method (if any) will not work until you call densify.\n \"\"\"\n msg = \"Estimator, %(name)s, must be fitted before sparsifying.\"\n check_is_fitted(self, msg=msg)\n self.coef_ = sp.csr_matrix(self.coef_)\n return self\n\n\nclass LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):\n \"\"\"\n Ordinary least squares Linear Regression.\n\n LinearRegression fits a linear model with coefficients w = (w1, ..., wp)\n to minimize the residual sum of squares between the observed targets in\n the dataset, and the targets predicted by the linear approximation.\n\n Parameters\n ----------\n fit_intercept : bool, default=True\n Whether to calculate the intercept for this model. If set\n to False, no intercept will be used in calculations\n (i.e. data is expected to be centered).\n\n normalize : bool, default=False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n copy_X : bool, default=True\n If True, X will be copied; else, it may be overwritten.\n\n n_jobs : int, default=None\n The number of jobs to use for the computation. This will only provide\n speedup for n_targets > 1 and sufficient large problems.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n positive : bool, default=False\n When set to ``True``, forces the coefficients to be positive. This\n option is only supported for dense arrays.\n\n .. versionadded:: 0.24\n\n Attributes\n ----------\n coef_ : array of shape (n_features, ) or (n_targets, n_features)\n Estimated coefficients for the linear regression problem.\n If multiple targets are passed during the fit (y 2D), this\n is a 2D array of shape (n_targets, n_features), while if only\n one target is passed, this is a 1D array of length n_features.\n\n rank_ : int\n Rank of matrix `X`. Only available when `X` is dense.\n\n singular_ : array of shape (min(X, y),)\n Singular values of `X`. Only available when `X` is dense.\n\n intercept_ : float or array of shape (n_targets,)\n Independent term in the linear model. Set to 0.0 if\n `fit_intercept = False`.\n\n See Also\n --------\n Ridge : Ridge regression addresses some of the\n problems of Ordinary Least Squares by imposing a penalty on the\n size of the coefficients with l2 regularization.\n Lasso : The Lasso is a linear model that estimates\n sparse coefficients with l1 regularization.\n ElasticNet : Elastic-Net is a linear regression\n model trained with both l1 and l2 -norm regularization of the\n coefficients.\n\n Notes\n -----\n From the implementation point of view, this is just plain Ordinary\n Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares\n (scipy.optimize.nnls) wrapped as a predictor object.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.linear_model import LinearRegression\n >>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])\n >>> # y = 1 * x_0 + 2 * x_1 + 3\n >>> y = np.dot(X, np.array([1, 2])) + 3\n >>> reg = LinearRegression().fit(X, y)\n >>> reg.score(X, y)\n 1.0\n >>> reg.coef_\n array([1., 2.])\n >>> reg.intercept_\n 3.0...\n >>> reg.predict(np.array([[3, 5]]))\n array([16.])\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, *, fit_intercept=True, normalize=False, copy_X=True,\n n_jobs=None, positive=False):\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.copy_X = copy_X\n self.n_jobs = n_jobs\n self.positive = positive\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"\n Fit linear model.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values. Will be cast to X's dtype if necessary\n\n sample_weight : array-like of shape (n_samples,), default=None\n Individual weights for each sample\n\n .. versionadded:: 0.17\n parameter *sample_weight* support to LinearRegression.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n\n n_jobs_ = self.n_jobs\n\n accept_sparse = False if self.positive else ['csr', 'csc', 'coo']\n\n X, y = self._validate_data(X, y, accept_sparse=accept_sparse,\n y_numeric=True, multi_output=True)\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X,\n dtype=X.dtype)\n\n X, y, X_offset, y_offset, X_scale = self._preprocess_data(\n X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,\n copy=self.copy_X, sample_weight=sample_weight,\n return_mean=True)\n\n if sample_weight is not None:\n # Sample weight can be implemented via a simple rescaling.\n X, y = _rescale_data(X, y, sample_weight)\n\n if self.positive:\n if y.ndim < 2:\n self.coef_, self._residues = optimize.nnls(X, y)\n else:\n # scipy.optimize.nnls cannot handle y with shape (M, K)\n outs = Parallel(n_jobs=n_jobs_)(\n delayed(optimize.nnls)(X, y[:, j])\n for j in range(y.shape[1]))\n self.coef_, self._residues = map(np.vstack, zip(*outs))\n elif sp.issparse(X):\n X_offset_scale = X_offset / X_scale\n\n def matvec(b):\n return X.dot(b) - b.dot(X_offset_scale)\n\n def rmatvec(b):\n return X.T.dot(b) - X_offset_scale * np.sum(b)\n\n X_centered = sparse.linalg.LinearOperator(shape=X.shape,\n matvec=matvec,\n rmatvec=rmatvec)\n\n if y.ndim < 2:\n out = sparse_lsqr(X_centered, y)\n self.coef_ = out[0]\n self._residues = out[3]\n else:\n # sparse_lstsq cannot handle y with shape (M, K)\n outs = Parallel(n_jobs=n_jobs_)(\n delayed(sparse_lsqr)(X_centered, y[:, j].ravel())\n for j in range(y.shape[1]))\n self.coef_ = np.vstack([out[0] for out in outs])\n self._residues = np.vstack([out[3] for out in outs])\n else:\n self.coef_, self._residues, self.rank_, self.singular_ = \\\n linalg.lstsq(X, y)\n self.coef_ = self.coef_.T\n\n if y.ndim == 1:\n self.coef_ = np.ravel(self.coef_)\n self._set_intercept(X_offset, y_offset, X_scale)\n return self\n\n\ndef _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy,\n check_input=True, sample_weight=None):\n \"\"\"Aux function used at beginning of fit in linear models\n\n Parameters\n ----------\n order : 'F', 'C' or None, default=None\n Whether X and y will be forced to be fortran or c-style. Only relevant\n if sample_weight is not None.\n \"\"\"\n n_samples, n_features = X.shape\n\n if sparse.isspmatrix(X):\n # copy is not needed here as X is not modified inplace when X is sparse\n precompute = False\n X, y, X_offset, y_offset, X_scale = _preprocess_data(\n X, y, fit_intercept=fit_intercept, normalize=normalize,\n copy=False, return_mean=True, check_input=check_input)\n else:\n # copy was done in fit if necessary\n X, y, X_offset, y_offset, X_scale = _preprocess_data(\n X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy,\n check_input=check_input, sample_weight=sample_weight)\n if sample_weight is not None:\n X, y = _rescale_data(X, y, sample_weight=sample_weight)\n if hasattr(precompute, '__array__') and (\n fit_intercept and not np.allclose(X_offset, np.zeros(n_features)) or\n normalize and not np.allclose(X_scale, np.ones(n_features))):\n warnings.warn(\"Gram matrix was provided but X was centered\"\n \" to fit intercept, \"\n \"or X was normalized : recomputing Gram matrix.\",\n UserWarning)\n # recompute Gram\n precompute = 'auto'\n Xy = None\n\n # precompute if n_samples > n_features\n if isinstance(precompute, str) and precompute == 'auto':\n precompute = (n_samples > n_features)\n\n if precompute is True:\n # make sure that the 'precompute' array is contiguous.\n precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype,\n order='C')\n np.dot(X.T, X, out=precompute)\n\n if not hasattr(precompute, '__array__'):\n Xy = None # cannot use Xy if precompute is not Gram\n\n if hasattr(precompute, '__array__') and Xy is None:\n common_dtype = np.find_common_type([X.dtype, y.dtype], [])\n if y.ndim == 1:\n # Xy is 1d, make sure it is contiguous.\n Xy = np.empty(shape=n_features, dtype=common_dtype, order='C')\n np.dot(X.T, y, out=Xy)\n else:\n # Make sure that Xy is always F contiguous even if X or y are not\n # contiguous: the goal is to make it fast to extract the data for a\n # specific target.\n n_targets = y.shape[1]\n Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype,\n order='F')\n np.dot(y.T, X, out=Xy.T)\n\n return X, y, X_offset, y_offset, X_scale, precompute, Xy\n", "\"\"\"\nTesting for the partial dependence module.\n\"\"\"\n\nimport numpy as np\nimport pytest\n\nimport sklearn\nfrom sklearn.inspection import partial_dependence\nfrom sklearn.inspection._partial_dependence import (\n _grid_from_X,\n _partial_dependence_brute,\n _partial_dependence_recursion\n)\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.experimental import enable_hist_gradient_boosting # noqa\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom sklearn.ensemble import HistGradientBoostingRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import MultiTaskLasso\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.datasets import load_iris\nfrom sklearn.datasets import make_classification, make_regression\nfrom sklearn.cluster import KMeans\nfrom sklearn.compose import make_column_transformer\nfrom sklearn.metrics import r2_score\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.preprocessing import scale\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.base import BaseEstimator, ClassifierMixin, clone\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.utils._testing import assert_allclose\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils import _IS_32BIT\nfrom sklearn.utils.validation import check_random_state\nfrom sklearn.tree.tests.test_tree import assert_is_subtree\n\n\n# toy sample\nX = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]\ny = [-1, -1, -1, 1, 1, 1]\n\n\n# (X, y), n_targets <-- as expected in the output of partial_dep()\nbinary_classification_data = (make_classification(n_samples=50,\n random_state=0), 1)\nmulticlass_classification_data = (make_classification(n_samples=50,\n n_classes=3,\n n_clusters_per_class=1,\n random_state=0), 3)\nregression_data = (make_regression(n_samples=50, random_state=0), 1)\nmultioutput_regression_data = (make_regression(n_samples=50, n_targets=2,\n random_state=0), 2)\n\n# iris\niris = load_iris()\n\n\[email protected](\"ignore:A Bunch will be returned\")\[email protected]('Estimator, method, data', [\n (GradientBoostingClassifier, 'auto', binary_classification_data),\n (GradientBoostingClassifier, 'auto', multiclass_classification_data),\n (GradientBoostingClassifier, 'brute', binary_classification_data),\n (GradientBoostingClassifier, 'brute', multiclass_classification_data),\n (GradientBoostingRegressor, 'auto', regression_data),\n (GradientBoostingRegressor, 'brute', regression_data),\n (DecisionTreeRegressor, 'brute', regression_data),\n (LinearRegression, 'brute', regression_data),\n (LinearRegression, 'brute', multioutput_regression_data),\n (LogisticRegression, 'brute', binary_classification_data),\n (LogisticRegression, 'brute', multiclass_classification_data),\n (MultiTaskLasso, 'brute', multioutput_regression_data),\n ])\[email protected]('grid_resolution', (5, 10))\[email protected]('features', ([1], [1, 2]))\[email protected]('kind', ('legacy', 'average', 'individual', 'both'))\ndef test_output_shape(Estimator, method, data, grid_resolution,\n features, kind):\n # Check that partial_dependence has consistent output shape for different\n # kinds of estimators:\n # - classifiers with binary and multiclass settings\n # - regressors\n # - multi-task regressors\n\n est = Estimator()\n\n # n_target corresponds to the number of classes (1 for binary classif) or\n # the number of tasks / outputs in multi task settings. It's equal to 1 for\n # classical regression_data.\n (X, y), n_targets = data\n n_instances = X.shape[0]\n\n est.fit(X, y)\n result = partial_dependence(\n est, X=X, features=features, method=method, kind=kind,\n grid_resolution=grid_resolution\n )\n # FIXME: Remove 'legacy' support in 1.1\n pdp, axes = result if kind == 'legacy' else (result, result[\"values\"])\n\n expected_pdp_shape = (n_targets,\n *[grid_resolution for _ in range(len(features))])\n expected_ice_shape = (n_targets, n_instances,\n *[grid_resolution for _ in range(len(features))])\n if kind == 'legacy':\n assert pdp.shape == expected_pdp_shape\n elif kind == 'average':\n assert pdp.average.shape == expected_pdp_shape\n elif kind == 'individual':\n assert pdp.individual.shape == expected_ice_shape\n else: # 'both'\n assert pdp.average.shape == expected_pdp_shape\n assert pdp.individual.shape == expected_ice_shape\n\n expected_axes_shape = (len(features), grid_resolution)\n assert axes is not None\n assert np.asarray(axes).shape == expected_axes_shape\n\n\ndef test_grid_from_X():\n # tests for _grid_from_X: sanity check for output, and for shapes.\n\n # Make sure that the grid is a cartesian product of the input (it will use\n # the unique values instead of the percentiles)\n percentiles = (.05, .95)\n grid_resolution = 100\n X = np.asarray([[1, 2],\n [3, 4]])\n grid, axes = _grid_from_X(X, percentiles, grid_resolution)\n assert_array_equal(grid, [[1, 2],\n [1, 4],\n [3, 2],\n [3, 4]])\n assert_array_equal(axes, X.T)\n\n # test shapes of returned objects depending on the number of unique values\n # for a feature.\n rng = np.random.RandomState(0)\n grid_resolution = 15\n\n # n_unique_values > grid_resolution\n X = rng.normal(size=(20, 2))\n grid, axes = _grid_from_X(X, percentiles, grid_resolution=grid_resolution)\n assert grid.shape == (grid_resolution * grid_resolution, X.shape[1])\n assert np.asarray(axes).shape == (2, grid_resolution)\n\n # n_unique_values < grid_resolution, will use actual values\n n_unique_values = 12\n X[n_unique_values - 1:, 0] = 12345\n rng.shuffle(X) # just to make sure the order is irrelevant\n grid, axes = _grid_from_X(X, percentiles, grid_resolution=grid_resolution)\n assert grid.shape == (n_unique_values * grid_resolution, X.shape[1])\n # axes is a list of arrays of different shapes\n assert axes[0].shape == (n_unique_values,)\n assert axes[1].shape == (grid_resolution,)\n\n\[email protected](\n \"grid_resolution, percentiles, err_msg\",\n [(2, (0, 0.0001), \"percentiles are too close\"),\n (100, (1, 2, 3, 4), \"'percentiles' must be a sequence of 2 elements\"),\n (100, 12345, \"'percentiles' must be a sequence of 2 elements\"),\n (100, (-1, .95), r\"'percentiles' values must be in \\[0, 1\\]\"),\n (100, (.05, 2), r\"'percentiles' values must be in \\[0, 1\\]\"),\n (100, (.9, .1), r\"percentiles\\[0\\] must be strictly less than\"),\n (1, (0.05, 0.95), \"'grid_resolution' must be strictly greater than 1\")]\n)\ndef test_grid_from_X_error(grid_resolution, percentiles, err_msg):\n X = np.asarray([[1, 2], [3, 4]])\n with pytest.raises(ValueError, match=err_msg):\n _grid_from_X(\n X, grid_resolution=grid_resolution, percentiles=percentiles\n )\n\n\[email protected]('target_feature', range(5))\[email protected]('est, method', [\n (LinearRegression(), 'brute'),\n (GradientBoostingRegressor(random_state=0), 'brute'),\n (GradientBoostingRegressor(random_state=0), 'recursion'),\n (HistGradientBoostingRegressor(random_state=0), 'brute'),\n (HistGradientBoostingRegressor(random_state=0), 'recursion')]\n)\ndef test_partial_dependence_helpers(est, method, target_feature):\n # Check that what is returned by _partial_dependence_brute or\n # _partial_dependence_recursion is equivalent to manually setting a target\n # feature to a given value, and computing the average prediction over all\n # samples.\n # This also checks that the brute and recursion methods give the same\n # output.\n # Note that even on the trainset, the brute and the recursion methods\n # aren't always strictly equivalent, in particular when the slow method\n # generates unrealistic samples that have low mass in the joint\n # distribution of the input features, and when some of the features are\n # dependent. Hence the high tolerance on the checks.\n\n X, y = make_regression(random_state=0, n_features=5, n_informative=5)\n # The 'init' estimator for GBDT (here the average prediction) isn't taken\n # into account with the recursion method, for technical reasons. We set\n # the mean to 0 to that this 'bug' doesn't have any effect.\n y = y - y.mean()\n est.fit(X, y)\n\n # target feature will be set to .5 and then to 123\n features = np.array([target_feature], dtype=np.int32)\n grid = np.array([[.5],\n [123]])\n\n if method == 'brute':\n pdp, predictions = _partial_dependence_brute(est, grid, features, X,\n response_method='auto')\n else:\n pdp = _partial_dependence_recursion(est, grid, features)\n\n mean_predictions = []\n for val in (.5, 123):\n X_ = X.copy()\n X_[:, target_feature] = val\n mean_predictions.append(est.predict(X_).mean())\n\n pdp = pdp[0] # (shape is (1, 2) so make it (2,))\n\n # allow for greater margin for error with recursion method\n rtol = 1e-1 if method == 'recursion' else 1e-3\n assert np.allclose(pdp, mean_predictions, rtol=rtol)\n\n\[email protected]('seed', range(1))\ndef test_recursion_decision_tree_vs_forest_and_gbdt(seed):\n # Make sure that the recursion method gives the same results on a\n # DecisionTreeRegressor and a GradientBoostingRegressor or a\n # RandomForestRegressor with 1 tree and equivalent parameters.\n\n rng = np.random.RandomState(seed)\n\n # Purely random dataset to avoid correlated features\n n_samples = 1000\n n_features = 5\n X = rng.randn(n_samples, n_features)\n y = rng.randn(n_samples) * 10\n\n # The 'init' estimator for GBDT (here the average prediction) isn't taken\n # into account with the recursion method, for technical reasons. We set\n # the mean to 0 to that this 'bug' doesn't have any effect.\n y = y - y.mean()\n\n # set max_depth not too high to avoid splits with same gain but different\n # features\n max_depth = 5\n\n tree_seed = 0\n forest = RandomForestRegressor(n_estimators=1, max_features=None,\n bootstrap=False, max_depth=max_depth,\n random_state=tree_seed)\n # The forest will use ensemble.base._set_random_states to set the\n # random_state of the tree sub-estimator. We simulate this here to have\n # equivalent estimators.\n equiv_random_state = check_random_state(tree_seed).randint(\n np.iinfo(np.int32).max)\n gbdt = GradientBoostingRegressor(n_estimators=1, learning_rate=1,\n criterion='mse', max_depth=max_depth,\n random_state=equiv_random_state)\n tree = DecisionTreeRegressor(max_depth=max_depth,\n random_state=equiv_random_state)\n\n forest.fit(X, y)\n gbdt.fit(X, y)\n tree.fit(X, y)\n\n # sanity check: if the trees aren't the same, the PD values won't be equal\n try:\n assert_is_subtree(tree.tree_, gbdt[0, 0].tree_)\n assert_is_subtree(tree.tree_, forest[0].tree_)\n except AssertionError:\n # For some reason the trees aren't exactly equal on 32bits, so the PDs\n # cannot be equal either. See\n # https://github.com/scikit-learn/scikit-learn/issues/8853\n assert _IS_32BIT, \"this should only fail on 32 bit platforms\"\n return\n\n grid = rng.randn(50).reshape(-1, 1)\n for f in range(n_features):\n features = np.array([f], dtype=np.int32)\n\n pdp_forest = _partial_dependence_recursion(forest, grid, features)\n pdp_gbdt = _partial_dependence_recursion(gbdt, grid, features)\n pdp_tree = _partial_dependence_recursion(tree, grid, features)\n\n np.testing.assert_allclose(pdp_gbdt, pdp_tree)\n np.testing.assert_allclose(pdp_forest, pdp_tree)\n\n\[email protected]('est', (\n GradientBoostingClassifier(random_state=0),\n HistGradientBoostingClassifier(random_state=0),\n))\[email protected]('target_feature', (0, 1, 2, 3, 4, 5))\ndef test_recursion_decision_function(est, target_feature):\n # Make sure the recursion method (implicitly uses decision_function) has\n # the same result as using brute method with\n # response_method=decision_function\n\n X, y = make_classification(n_classes=2, n_clusters_per_class=1,\n random_state=1)\n assert np.mean(y) == .5 # make sure the init estimator predicts 0 anyway\n\n est.fit(X, y)\n\n preds_1 = partial_dependence(\n est, X, [target_feature], response_method='decision_function',\n method='recursion', kind='average'\n )\n preds_2 = partial_dependence(\n est, X, [target_feature], response_method='decision_function',\n method='brute', kind='average'\n )\n\n assert_allclose(preds_1['average'], preds_2['average'], atol=1e-7)\n\n\[email protected]('est', (\n LinearRegression(),\n GradientBoostingRegressor(random_state=0),\n HistGradientBoostingRegressor(random_state=0, min_samples_leaf=1,\n max_leaf_nodes=None, max_iter=1),\n DecisionTreeRegressor(random_state=0),\n))\[email protected]('power', (1, 2))\ndef test_partial_dependence_easy_target(est, power):\n # If the target y only depends on one feature in an obvious way (linear or\n # quadratic) then the partial dependence for that feature should reflect\n # it.\n # We here fit a linear regression_data model (with polynomial features if\n # needed) and compute r_squared to check that the partial dependence\n # correctly reflects the target.\n\n rng = np.random.RandomState(0)\n n_samples = 200\n target_variable = 2\n X = rng.normal(size=(n_samples, 5))\n y = X[:, target_variable]**power\n\n est.fit(X, y)\n\n pdp = partial_dependence(\n est, features=[target_variable], X=X, grid_resolution=1000,\n kind='average'\n )\n\n new_X = pdp[\"values\"][0].reshape(-1, 1)\n new_y = pdp['average'][0]\n # add polynomial features if needed\n new_X = PolynomialFeatures(degree=power).fit_transform(new_X)\n\n lr = LinearRegression().fit(new_X, new_y)\n r2 = r2_score(new_y, lr.predict(new_X))\n\n assert r2 > .99\n\n\[email protected]('Estimator',\n (sklearn.tree.DecisionTreeClassifier,\n sklearn.tree.ExtraTreeClassifier,\n sklearn.ensemble.ExtraTreesClassifier,\n sklearn.neighbors.KNeighborsClassifier,\n sklearn.neighbors.RadiusNeighborsClassifier,\n sklearn.ensemble.RandomForestClassifier))\ndef test_multiclass_multioutput(Estimator):\n # Make sure error is raised for multiclass-multioutput classifiers\n\n # make multiclass-multioutput dataset\n X, y = make_classification(n_classes=3, n_clusters_per_class=1,\n random_state=0)\n y = np.array([y, y]).T\n\n est = Estimator()\n est.fit(X, y)\n\n with pytest.raises(\n ValueError,\n match=\"Multiclass-multioutput estimators are not supported\"):\n partial_dependence(est, X, [0])\n\n\nclass NoPredictProbaNoDecisionFunction(ClassifierMixin, BaseEstimator):\n def fit(self, X, y):\n # simulate that we have some classes\n self.classes_ = [0, 1]\n return self\n\n\[email protected](\"ignore:A Bunch will be returned\")\[email protected](\n \"estimator, params, err_msg\",\n [(KMeans(),\n {'features': [0]},\n \"'estimator' must be a fitted regressor or classifier\"),\n (LinearRegression(),\n {'features': [0], 'response_method': 'predict_proba'},\n 'The response_method parameter is ignored for regressors'),\n (GradientBoostingClassifier(random_state=0),\n {'features': [0], 'response_method': 'predict_proba',\n 'method': 'recursion'},\n \"'recursion' method, the response_method must be 'decision_function'\"),\n (GradientBoostingClassifier(random_state=0),\n {'features': [0], 'response_method': 'predict_proba', 'method': 'auto'},\n \"'recursion' method, the response_method must be 'decision_function'\"),\n (GradientBoostingClassifier(random_state=0),\n {'features': [0], 'response_method': 'blahblah'},\n 'response_method blahblah is invalid. Accepted response_method'),\n (NoPredictProbaNoDecisionFunction(),\n {'features': [0], 'response_method': 'auto'},\n 'The estimator has no predict_proba and no decision_function method'),\n (NoPredictProbaNoDecisionFunction(),\n {'features': [0], 'response_method': 'predict_proba'},\n 'The estimator has no predict_proba method.'),\n (NoPredictProbaNoDecisionFunction(),\n {'features': [0], 'response_method': 'decision_function'},\n 'The estimator has no decision_function method.'),\n (LinearRegression(),\n {'features': [0], 'method': 'blahblah'},\n 'blahblah is invalid. Accepted method names are brute, recursion, auto'),\n (LinearRegression(),\n {'features': [0], 'method': 'recursion', 'kind': 'individual'},\n \"The 'recursion' method only applies when 'kind' is set to 'average'\"),\n (LinearRegression(),\n {'features': [0], 'method': 'recursion', 'kind': 'both'},\n \"The 'recursion' method only applies when 'kind' is set to 'average'\"),\n (LinearRegression(),\n {'features': [0], 'method': 'recursion'},\n \"Only the following estimators support the 'recursion' method:\")]\n)\ndef test_partial_dependence_error(estimator, params, err_msg):\n X, y = make_classification(random_state=0)\n estimator.fit(X, y)\n\n with pytest.raises(ValueError, match=err_msg):\n partial_dependence(estimator, X, **params)\n\n\[email protected](\n \"with_dataframe, err_msg\",\n [(True, \"Only array-like or scalar are supported\"),\n (False, \"Only array-like or scalar are supported\")]\n)\ndef test_partial_dependence_slice_error(with_dataframe, err_msg):\n X, y = make_classification(random_state=0)\n if with_dataframe:\n pd = pytest.importorskip('pandas')\n X = pd.DataFrame(X)\n estimator = LogisticRegression().fit(X, y)\n\n with pytest.raises(TypeError, match=err_msg):\n partial_dependence(estimator, X, features=slice(0, 2, 1))\n\n\[email protected](\n 'estimator',\n [LinearRegression(), GradientBoostingClassifier(random_state=0)]\n)\[email protected]('features', [-1, 10000])\ndef test_partial_dependence_unknown_feature_indices(estimator, features):\n X, y = make_classification(random_state=0)\n estimator.fit(X, y)\n\n err_msg = 'all features must be in'\n with pytest.raises(ValueError, match=err_msg):\n partial_dependence(estimator, X, [features])\n\n\[email protected](\n 'estimator',\n [LinearRegression(), GradientBoostingClassifier(random_state=0)]\n)\ndef test_partial_dependence_unknown_feature_string(estimator):\n pd = pytest.importorskip(\"pandas\")\n X, y = make_classification(random_state=0)\n df = pd.DataFrame(X)\n estimator.fit(df, y)\n\n features = ['random']\n err_msg = 'A given column is not a column of the dataframe'\n with pytest.raises(ValueError, match=err_msg):\n partial_dependence(estimator, df, features)\n\n\[email protected](\n 'estimator',\n [LinearRegression(), GradientBoostingClassifier(random_state=0)]\n)\ndef test_partial_dependence_X_list(estimator):\n # check that array-like objects are accepted\n X, y = make_classification(random_state=0)\n estimator.fit(X, y)\n partial_dependence(estimator, list(X), [0], kind='average')\n\n\ndef test_warning_recursion_non_constant_init():\n # make sure that passing a non-constant init parameter to a GBDT and using\n # recursion method yields a warning.\n\n gbc = GradientBoostingClassifier(init=DummyClassifier(), random_state=0)\n gbc.fit(X, y)\n\n with pytest.warns(\n UserWarning,\n match='Using recursion method with a non-constant init predictor'):\n partial_dependence(gbc, X, [0], method='recursion', kind='average')\n\n with pytest.warns(\n UserWarning,\n match='Using recursion method with a non-constant init predictor'):\n partial_dependence(gbc, X, [0], method='recursion', kind='average')\n\n\ndef test_partial_dependence_sample_weight():\n # Test near perfect correlation between partial dependence and diagonal\n # when sample weights emphasize y = x predictions\n # non-regression test for #13193\n # TODO: extend to HistGradientBoosting once sample_weight is supported\n N = 1000\n rng = np.random.RandomState(123456)\n mask = rng.randint(2, size=N, dtype=bool)\n\n x = rng.rand(N)\n # set y = x on mask and y = -x outside\n y = x.copy()\n y[~mask] = -y[~mask]\n X = np.c_[mask, x]\n # sample weights to emphasize data points where y = x\n sample_weight = np.ones(N)\n sample_weight[mask] = 1000.\n\n clf = GradientBoostingRegressor(n_estimators=10, random_state=1)\n clf.fit(X, y, sample_weight=sample_weight)\n\n pdp = partial_dependence(clf, X, features=[1], kind='average')\n\n assert np.corrcoef(pdp['average'], pdp[\"values\"])[0, 1] > 0.99\n\n\ndef test_hist_gbdt_sw_not_supported():\n # TODO: remove/fix when PDP supports HGBT with sample weights\n clf = HistGradientBoostingRegressor(random_state=1)\n clf.fit(X, y, sample_weight=np.ones(len(X)))\n\n with pytest.raises(NotImplementedError,\n match=\"does not support partial dependence\"):\n partial_dependence(clf, X, features=[1])\n\n\ndef test_partial_dependence_pipeline():\n # check that the partial dependence support pipeline\n iris = load_iris()\n\n scaler = StandardScaler()\n clf = DummyClassifier(random_state=42)\n pipe = make_pipeline(scaler, clf)\n\n clf.fit(scaler.fit_transform(iris.data), iris.target)\n pipe.fit(iris.data, iris.target)\n\n features = 0\n pdp_pipe = partial_dependence(\n pipe, iris.data, features=[features], grid_resolution=10,\n kind='average'\n )\n pdp_clf = partial_dependence(\n clf, scaler.transform(iris.data), features=[features],\n grid_resolution=10, kind='average'\n )\n assert_allclose(pdp_pipe['average'], pdp_clf['average'])\n assert_allclose(\n pdp_pipe[\"values\"][0],\n pdp_clf[\"values\"][0] * scaler.scale_[features] + scaler.mean_[features]\n )\n\n\[email protected](\n \"estimator\",\n [LogisticRegression(max_iter=1000, random_state=0),\n GradientBoostingClassifier(random_state=0, n_estimators=5)],\n ids=['estimator-brute', 'estimator-recursion']\n)\[email protected](\n \"preprocessor\",\n [None,\n make_column_transformer(\n (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),\n (RobustScaler(), [iris.feature_names[i] for i in (1, 3)])),\n make_column_transformer(\n (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),\n remainder='passthrough')],\n ids=['None', 'column-transformer', 'column-transformer-passthrough']\n)\[email protected](\n \"features\",\n [[0, 2], [iris.feature_names[i] for i in (0, 2)]],\n ids=['features-integer', 'features-string']\n)\ndef test_partial_dependence_dataframe(estimator, preprocessor, features):\n # check that the partial dependence support dataframe and pipeline\n # including a column transformer\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame(scale(iris.data), columns=iris.feature_names)\n\n pipe = make_pipeline(preprocessor, estimator)\n pipe.fit(df, iris.target)\n pdp_pipe = partial_dependence(\n pipe, df, features=features, grid_resolution=10, kind='average'\n )\n\n # the column transformer will reorder the column when transforming\n # we mixed the index to be sure that we are computing the partial\n # dependence of the right columns\n if preprocessor is not None:\n X_proc = clone(preprocessor).fit_transform(df)\n features_clf = [0, 1]\n else:\n X_proc = df\n features_clf = [0, 2]\n\n clf = clone(estimator).fit(X_proc, iris.target)\n pdp_clf = partial_dependence(\n clf, X_proc, features=features_clf, method='brute', grid_resolution=10,\n kind='average'\n )\n\n assert_allclose(pdp_pipe['average'], pdp_clf['average'])\n if preprocessor is not None:\n scaler = preprocessor.named_transformers_['standardscaler']\n assert_allclose(\n pdp_pipe[\"values\"][1],\n pdp_clf[\"values\"][1] * scaler.scale_[1] + scaler.mean_[1]\n )\n else:\n assert_allclose(pdp_pipe[\"values\"][1], pdp_clf[\"values\"][1])\n\n\[email protected](\n \"features, expected_pd_shape\",\n [(0, (3, 10)),\n (iris.feature_names[0], (3, 10)),\n ([0, 2], (3, 10, 10)),\n ([iris.feature_names[i] for i in (0, 2)], (3, 10, 10)),\n ([True, False, True, False], (3, 10, 10))],\n ids=['scalar-int', 'scalar-str', 'list-int', 'list-str', 'mask']\n)\ndef test_partial_dependence_feature_type(features, expected_pd_shape):\n # check all possible features type supported in PDP\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame(iris.data, columns=iris.feature_names)\n\n preprocessor = make_column_transformer(\n (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),\n (RobustScaler(), [iris.feature_names[i] for i in (1, 3)])\n )\n pipe = make_pipeline(\n preprocessor, LogisticRegression(max_iter=1000, random_state=0)\n )\n pipe.fit(df, iris.target)\n pdp_pipe = partial_dependence(\n pipe, df, features=features, grid_resolution=10, kind='average'\n )\n assert pdp_pipe['average'].shape == expected_pd_shape\n assert len(pdp_pipe[\"values\"]) == len(pdp_pipe['average'].shape) - 1\n\n\[email protected](\n \"estimator\", [LinearRegression(), LogisticRegression(),\n GradientBoostingRegressor(), GradientBoostingClassifier()]\n)\ndef test_partial_dependence_unfitted(estimator):\n X = iris.data\n preprocessor = make_column_transformer(\n (StandardScaler(), [0, 2]), (RobustScaler(), [1, 3])\n )\n pipe = make_pipeline(preprocessor, estimator)\n with pytest.raises(NotFittedError, match=\"is not fitted yet\"):\n partial_dependence(pipe, X, features=[0, 2], grid_resolution=10)\n with pytest.raises(NotFittedError, match=\"is not fitted yet\"):\n partial_dependence(estimator, X, features=[0, 2], grid_resolution=10)\n\n\[email protected]('Estimator, data', [\n (LinearRegression, multioutput_regression_data),\n (LogisticRegression, binary_classification_data)])\ndef test_kind_average_and_average_of_individual(Estimator, data):\n est = Estimator()\n (X, y), n_targets = data\n est.fit(X, y)\n\n pdp_avg = partial_dependence(\n est, X=X, features=[1, 2], kind='average'\n )\n pdp_ind = partial_dependence(\n est, X=X, features=[1, 2], kind='individual'\n )\n avg_ind = np.mean(pdp_ind['individual'], axis=1)\n assert_allclose(avg_ind, pdp_avg['average'])\n\n\ndef test_warning_for_kind_legacy():\n est = LogisticRegression()\n (X, y), n_targets = binary_classification_data\n est.fit(X, y)\n\n err_msg = (\"A Bunch will be returned in place of 'predictions' from \"\n \"version 1.1\")\n with pytest.warns(FutureWarning, match=err_msg):\n partial_dependence(est, X=X, features=[1, 2])\n\n with pytest.warns(FutureWarning, match=err_msg):\n partial_dependence(est, X=X, features=[1, 2], kind='legacy')\n", "\"\"\"Weight Boosting.\n\nThis module contains weight boosting estimators for both classification and\nregression.\n\nThe module structure is the following:\n\n- The `BaseWeightBoosting` base class implements a common ``fit`` method\n for all the estimators in the module. Regression and classification\n only differ from each other in the loss function that is optimized.\n\n- :class:`~sklearn.ensemble.AdaBoostClassifier` implements adaptive boosting\n (AdaBoost-SAMME) for classification problems.\n\n- :class:`~sklearn.ensemble.AdaBoostRegressor` implements adaptive boosting\n (AdaBoost.R2) for regression problems.\n\"\"\"\n\n# Authors: Noel Dawe <[email protected]>\n# Gilles Louppe <[email protected]>\n# Hamzeh Alsalhi <[email protected]>\n# Arnaud Joly <[email protected]>\n#\n# License: BSD 3 clause\n\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\n\nfrom scipy.special import xlogy\n\nfrom ._base import BaseEnsemble\nfrom ..base import ClassifierMixin, RegressorMixin, is_classifier, is_regressor\n\nfrom ..tree import DecisionTreeClassifier, DecisionTreeRegressor\nfrom ..utils import check_array, check_random_state, _safe_indexing\nfrom ..utils.extmath import softmax\nfrom ..utils.extmath import stable_cumsum\nfrom ..metrics import accuracy_score, r2_score\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.validation import _check_sample_weight\nfrom ..utils.validation import has_fit_parameter\nfrom ..utils.validation import _num_samples\nfrom ..utils.validation import _deprecate_positional_args\n\n__all__ = [\n 'AdaBoostClassifier',\n 'AdaBoostRegressor',\n]\n\n\nclass BaseWeightBoosting(BaseEnsemble, metaclass=ABCMeta):\n \"\"\"Base class for AdaBoost estimators.\n\n Warning: This class should not be used directly. Use derived classes\n instead.\n \"\"\"\n\n @abstractmethod\n def __init__(self,\n base_estimator=None, *,\n n_estimators=50,\n estimator_params=tuple(),\n learning_rate=1.,\n random_state=None):\n\n super().__init__(\n base_estimator=base_estimator,\n n_estimators=n_estimators,\n estimator_params=estimator_params)\n\n self.learning_rate = learning_rate\n self.random_state = random_state\n\n def _check_X(self, X):\n return check_array(X, accept_sparse=['csr', 'csc'], ensure_2d=True,\n allow_nd=True, dtype=None)\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Build a boosted classifier/regressor from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n y : array-like of shape (n_samples,)\n The target values (class labels in classification, real numbers in\n regression).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, the sample weights are initialized to\n 1 / n_samples.\n\n Returns\n -------\n self : object\n \"\"\"\n # Check parameters\n if self.learning_rate <= 0:\n raise ValueError(\"learning_rate must be greater than zero\")\n\n X, y = self._validate_data(X, y,\n accept_sparse=['csr', 'csc'],\n ensure_2d=True,\n allow_nd=True,\n dtype=None,\n y_numeric=is_regressor(self))\n\n sample_weight = _check_sample_weight(sample_weight, X, np.float64)\n sample_weight /= sample_weight.sum()\n if np.any(sample_weight < 0):\n raise ValueError(\"sample_weight cannot contain negative weights\")\n\n # Check parameters\n self._validate_estimator()\n\n # Clear any previous fit results\n self.estimators_ = []\n self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)\n self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)\n\n # Initializion of the random number instance that will be used to\n # generate a seed at each iteration\n random_state = check_random_state(self.random_state)\n\n for iboost in range(self.n_estimators):\n # Boosting step\n sample_weight, estimator_weight, estimator_error = self._boost(\n iboost,\n X, y,\n sample_weight,\n random_state)\n\n # Early termination\n if sample_weight is None:\n break\n\n self.estimator_weights_[iboost] = estimator_weight\n self.estimator_errors_[iboost] = estimator_error\n\n # Stop if error is zero\n if estimator_error == 0:\n break\n\n sample_weight_sum = np.sum(sample_weight)\n\n # Stop if the sum of sample weights has become non-positive\n if sample_weight_sum <= 0:\n break\n\n if iboost < self.n_estimators - 1:\n # Normalize\n sample_weight /= sample_weight_sum\n\n return self\n\n @abstractmethod\n def _boost(self, iboost, X, y, sample_weight, random_state):\n \"\"\"Implement a single boost.\n\n Warning: This method needs to be overridden by subclasses.\n\n Parameters\n ----------\n iboost : int\n The index of the current boost iteration.\n\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n y : array-like of shape (n_samples,)\n The target values (class labels).\n\n sample_weight : array-like of shape (n_samples,)\n The current sample weights.\n\n random_state : RandomState\n The current random number generator\n\n Returns\n -------\n sample_weight : array-like of shape (n_samples,) or None\n The reweighted sample weights.\n If None then boosting has terminated early.\n\n estimator_weight : float\n The weight for the current boost.\n If None then boosting has terminated early.\n\n error : float\n The classification error for the current boost.\n If None then boosting has terminated early.\n \"\"\"\n pass\n\n def staged_score(self, X, y, sample_weight=None):\n \"\"\"Return staged scores for X, y.\n\n This generator method yields the ensemble score after each iteration of\n boosting and therefore allows monitoring, such as to determine the\n score on a test set after each boost.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n y : array-like of shape (n_samples,)\n Labels for X.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Yields\n ------\n z : float\n \"\"\"\n X = self._check_X(X)\n\n for y_pred in self.staged_predict(X):\n if is_classifier(self):\n yield accuracy_score(y, y_pred, sample_weight=sample_weight)\n else:\n yield r2_score(y, y_pred, sample_weight=sample_weight)\n\n @property\n def feature_importances_(self):\n \"\"\"The impurity-based feature importances.\n\n The higher, the more important the feature.\n The importance of a feature is computed as the (normalized)\n total reduction of the criterion brought by that feature. It is also\n known as the Gini importance.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n Returns\n -------\n feature_importances_ : ndarray of shape (n_features,)\n The feature importances.\n \"\"\"\n if self.estimators_ is None or len(self.estimators_) == 0:\n raise ValueError(\"Estimator not fitted, \"\n \"call `fit` before `feature_importances_`.\")\n\n try:\n norm = self.estimator_weights_.sum()\n return (sum(weight * clf.feature_importances_ for weight, clf\n in zip(self.estimator_weights_, self.estimators_))\n / norm)\n\n except AttributeError as e:\n raise AttributeError(\n \"Unable to compute feature importances \"\n \"since base_estimator does not have a \"\n \"feature_importances_ attribute\") from e\n\n\ndef _samme_proba(estimator, n_classes, X):\n \"\"\"Calculate algorithm 4, step 2, equation c) of Zhu et al [1].\n\n References\n ----------\n .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, \"Multi-class AdaBoost\", 2009.\n\n \"\"\"\n proba = estimator.predict_proba(X)\n\n # Displace zero probabilities so the log is defined.\n # Also fix negative elements which may occur with\n # negative sample weights.\n np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)\n log_proba = np.log(proba)\n\n return (n_classes - 1) * (log_proba - (1. / n_classes)\n * log_proba.sum(axis=1)[:, np.newaxis])\n\n\nclass AdaBoostClassifier(ClassifierMixin, BaseWeightBoosting):\n \"\"\"An AdaBoost classifier.\n\n An AdaBoost [1] classifier is a meta-estimator that begins by fitting a\n classifier on the original dataset and then fits additional copies of the\n classifier on the same dataset but where the weights of incorrectly\n classified instances are adjusted such that subsequent classifiers focus\n more on difficult cases.\n\n This class implements the algorithm known as AdaBoost-SAMME [2].\n\n Read more in the :ref:`User Guide <adaboost>`.\n\n .. versionadded:: 0.14\n\n Parameters\n ----------\n base_estimator : object, default=None\n The base estimator from which the boosted ensemble is built.\n Support for sample weighting is required, as well as proper\n ``classes_`` and ``n_classes_`` attributes. If ``None``, then\n the base estimator is :class:`~sklearn.tree.DecisionTreeClassifier`\n initialized with `max_depth=1`.\n\n n_estimators : int, default=50\n The maximum number of estimators at which boosting is terminated.\n In case of perfect fit, the learning procedure is stopped early.\n\n learning_rate : float, default=1.\n Weight applied to each classifier at each boosting iteration. A higher\n learning rate increases the contribution of each classifier. There is\n a trade-off between the `learning_rate` and `n_estimators` parameters.\n\n algorithm : {'SAMME', 'SAMME.R'}, default='SAMME.R'\n If 'SAMME.R' then use the SAMME.R real boosting algorithm.\n ``base_estimator`` must support calculation of class probabilities.\n If 'SAMME' then use the SAMME discrete boosting algorithm.\n The SAMME.R algorithm typically converges faster than SAMME,\n achieving a lower test error with fewer boosting iterations.\n\n random_state : int, RandomState instance or None, default=None\n Controls the random seed given at each `base_estimator` at each\n boosting iteration.\n Thus, it is only used when `base_estimator` exposes a `random_state`.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Attributes\n ----------\n base_estimator_ : estimator\n The base estimator from which the ensemble is grown.\n\n estimators_ : list of classifiers\n The collection of fitted sub-estimators.\n\n classes_ : ndarray of shape (n_classes,)\n The classes labels.\n\n n_classes_ : int\n The number of classes.\n\n estimator_weights_ : ndarray of floats\n Weights for each estimator in the boosted ensemble.\n\n estimator_errors_ : ndarray of floats\n Classification error for each estimator in the boosted\n ensemble.\n\n feature_importances_ : ndarray of shape (n_features,)\n The impurity-based feature importances if supported by the\n ``base_estimator`` (when based on decision trees).\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n See Also\n --------\n AdaBoostRegressor : An AdaBoost regressor that begins by fitting a\n regressor on the original dataset and then fits additional copies of\n the regressor on the same dataset but where the weights of instances\n are adjusted according to the error of the current prediction.\n\n GradientBoostingClassifier : GB builds an additive model in a forward\n stage-wise fashion. Regression trees are fit on the negative gradient\n of the binomial or multinomial deviance loss function. Binary\n classification is a special case where only a single regression tree is\n induced.\n\n sklearn.tree.DecisionTreeClassifier : A non-parametric supervised learning\n method used for classification.\n Creates a model that predicts the value of a target variable by\n learning simple decision rules inferred from the data features.\n\n References\n ----------\n .. [1] Y. Freund, R. Schapire, \"A Decision-Theoretic Generalization of\n on-Line Learning and an Application to Boosting\", 1995.\n\n .. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, \"Multi-class AdaBoost\", 2009.\n\n Examples\n --------\n >>> from sklearn.ensemble import AdaBoostClassifier\n >>> from sklearn.datasets import make_classification\n >>> X, y = make_classification(n_samples=1000, n_features=4,\n ... n_informative=2, n_redundant=0,\n ... random_state=0, shuffle=False)\n >>> clf = AdaBoostClassifier(n_estimators=100, random_state=0)\n >>> clf.fit(X, y)\n AdaBoostClassifier(n_estimators=100, random_state=0)\n >>> clf.predict([[0, 0, 0, 0]])\n array([1])\n >>> clf.score(X, y)\n 0.983...\n \"\"\"\n @_deprecate_positional_args\n def __init__(self,\n base_estimator=None, *,\n n_estimators=50,\n learning_rate=1.,\n algorithm='SAMME.R',\n random_state=None):\n\n super().__init__(\n base_estimator=base_estimator,\n n_estimators=n_estimators,\n learning_rate=learning_rate,\n random_state=random_state)\n\n self.algorithm = algorithm\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Build a boosted classifier from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n y : array-like of shape (n_samples,)\n The target values (class labels).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, the sample weights are initialized to\n ``1 / n_samples``.\n\n Returns\n -------\n self : object\n Fitted estimator.\n \"\"\"\n # Check that algorithm is supported\n if self.algorithm not in ('SAMME', 'SAMME.R'):\n raise ValueError(\"algorithm %s is not supported\" % self.algorithm)\n\n # Fit\n return super().fit(X, y, sample_weight)\n\n def _validate_estimator(self):\n \"\"\"Check the estimator and set the base_estimator_ attribute.\"\"\"\n super()._validate_estimator(\n default=DecisionTreeClassifier(max_depth=1))\n\n # SAMME-R requires predict_proba-enabled base estimators\n if self.algorithm == 'SAMME.R':\n if not hasattr(self.base_estimator_, 'predict_proba'):\n raise TypeError(\n \"AdaBoostClassifier with algorithm='SAMME.R' requires \"\n \"that the weak learner supports the calculation of class \"\n \"probabilities with a predict_proba method.\\n\"\n \"Please change the base estimator or set \"\n \"algorithm='SAMME' instead.\")\n if not has_fit_parameter(self.base_estimator_, \"sample_weight\"):\n raise ValueError(\"%s doesn't support sample_weight.\"\n % self.base_estimator_.__class__.__name__)\n\n def _boost(self, iboost, X, y, sample_weight, random_state):\n \"\"\"Implement a single boost.\n\n Perform a single boost according to the real multi-class SAMME.R\n algorithm or to the discrete SAMME algorithm and return the updated\n sample weights.\n\n Parameters\n ----------\n iboost : int\n The index of the current boost iteration.\n\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples.\n\n y : array-like of shape (n_samples,)\n The target values (class labels).\n\n sample_weight : array-like of shape (n_samples,)\n The current sample weights.\n\n random_state : RandomState instance\n The RandomState instance used if the base estimator accepts a\n `random_state` attribute.\n\n Returns\n -------\n sample_weight : array-like of shape (n_samples,) or None\n The reweighted sample weights.\n If None then boosting has terminated early.\n\n estimator_weight : float\n The weight for the current boost.\n If None then boosting has terminated early.\n\n estimator_error : float\n The classification error for the current boost.\n If None then boosting has terminated early.\n \"\"\"\n if self.algorithm == 'SAMME.R':\n return self._boost_real(iboost, X, y, sample_weight, random_state)\n\n else: # elif self.algorithm == \"SAMME\":\n return self._boost_discrete(iboost, X, y, sample_weight,\n random_state)\n\n def _boost_real(self, iboost, X, y, sample_weight, random_state):\n \"\"\"Implement a single boost using the SAMME.R real algorithm.\"\"\"\n estimator = self._make_estimator(random_state=random_state)\n\n estimator.fit(X, y, sample_weight=sample_weight)\n\n y_predict_proba = estimator.predict_proba(X)\n\n if iboost == 0:\n self.classes_ = getattr(estimator, 'classes_', None)\n self.n_classes_ = len(self.classes_)\n\n y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),\n axis=0)\n\n # Instances incorrectly classified\n incorrect = y_predict != y\n\n # Error fraction\n estimator_error = np.mean(\n np.average(incorrect, weights=sample_weight, axis=0))\n\n # Stop if classification is perfect\n if estimator_error <= 0:\n return sample_weight, 1., 0.\n\n # Construct y coding as described in Zhu et al [2]:\n #\n # y_k = 1 if c == k else -1 / (K - 1)\n #\n # where K == n_classes_ and c, k in [0, K) are indices along the second\n # axis of the y coding with c being the index corresponding to the true\n # class label.\n n_classes = self.n_classes_\n classes = self.classes_\n y_codes = np.array([-1. / (n_classes - 1), 1.])\n y_coding = y_codes.take(classes == y[:, np.newaxis])\n\n # Displace zero probabilities so the log is defined.\n # Also fix negative elements which may occur with\n # negative sample weights.\n proba = y_predict_proba # alias for readability\n np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)\n\n # Boost weight using multi-class AdaBoost SAMME.R alg\n estimator_weight = (-1. * self.learning_rate\n * ((n_classes - 1.) / n_classes)\n * xlogy(y_coding, y_predict_proba).sum(axis=1))\n\n # Only boost the weights if it will fit again\n if not iboost == self.n_estimators - 1:\n # Only boost positive weights\n sample_weight *= np.exp(estimator_weight *\n ((sample_weight > 0) |\n (estimator_weight < 0)))\n\n return sample_weight, 1., estimator_error\n\n def _boost_discrete(self, iboost, X, y, sample_weight, random_state):\n \"\"\"Implement a single boost using the SAMME discrete algorithm.\"\"\"\n estimator = self._make_estimator(random_state=random_state)\n\n estimator.fit(X, y, sample_weight=sample_weight)\n\n y_predict = estimator.predict(X)\n\n if iboost == 0:\n self.classes_ = getattr(estimator, 'classes_', None)\n self.n_classes_ = len(self.classes_)\n\n # Instances incorrectly classified\n incorrect = y_predict != y\n\n # Error fraction\n estimator_error = np.mean(\n np.average(incorrect, weights=sample_weight, axis=0))\n\n # Stop if classification is perfect\n if estimator_error <= 0:\n return sample_weight, 1., 0.\n\n n_classes = self.n_classes_\n\n # Stop if the error is at least as bad as random guessing\n if estimator_error >= 1. - (1. / n_classes):\n self.estimators_.pop(-1)\n if len(self.estimators_) == 0:\n raise ValueError('BaseClassifier in AdaBoostClassifier '\n 'ensemble is worse than random, ensemble '\n 'can not be fit.')\n return None, None, None\n\n # Boost weight using multi-class AdaBoost SAMME alg\n estimator_weight = self.learning_rate * (\n np.log((1. - estimator_error) / estimator_error) +\n np.log(n_classes - 1.))\n\n # Only boost the weights if I will fit again\n if not iboost == self.n_estimators - 1:\n # Only boost positive weights\n sample_weight *= np.exp(estimator_weight * incorrect *\n (sample_weight > 0))\n\n return sample_weight, estimator_weight, estimator_error\n\n def predict(self, X):\n \"\"\"Predict classes for X.\n\n The predicted class of an input sample is computed as the weighted mean\n prediction of the classifiers in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n The predicted classes.\n \"\"\"\n X = self._check_X(X)\n\n pred = self.decision_function(X)\n\n if self.n_classes_ == 2:\n return self.classes_.take(pred > 0, axis=0)\n\n return self.classes_.take(np.argmax(pred, axis=1), axis=0)\n\n def staged_predict(self, X):\n \"\"\"Return staged predictions for X.\n\n The predicted class of an input sample is computed as the weighted mean\n prediction of the classifiers in the ensemble.\n\n This generator method yields the ensemble prediction after each\n iteration of boosting and therefore allows monitoring, such as to\n determine the prediction on a test set after each boost.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Yields\n ------\n y : generator of ndarray of shape (n_samples,)\n The predicted classes.\n \"\"\"\n X = self._check_X(X)\n\n n_classes = self.n_classes_\n classes = self.classes_\n\n if n_classes == 2:\n for pred in self.staged_decision_function(X):\n yield np.array(classes.take(pred > 0, axis=0))\n\n else:\n for pred in self.staged_decision_function(X):\n yield np.array(classes.take(\n np.argmax(pred, axis=1), axis=0))\n\n def decision_function(self, X):\n \"\"\"Compute the decision function of ``X``.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Returns\n -------\n score : ndarray of shape of (n_samples, k)\n The decision function of the input samples. The order of\n outputs is the same of that of the :term:`classes_` attribute.\n Binary classification is a special cases with ``k == 1``,\n otherwise ``k==n_classes``. For binary classification,\n values closer to -1 or 1 mean more like the first or second\n class in ``classes_``, respectively.\n \"\"\"\n check_is_fitted(self)\n X = self._check_X(X)\n\n n_classes = self.n_classes_\n classes = self.classes_[:, np.newaxis]\n\n if self.algorithm == 'SAMME.R':\n # The weights are all 1. for SAMME.R\n pred = sum(_samme_proba(estimator, n_classes, X)\n for estimator in self.estimators_)\n else: # self.algorithm == \"SAMME\"\n pred = sum((estimator.predict(X) == classes).T * w\n for estimator, w in zip(self.estimators_,\n self.estimator_weights_))\n\n pred /= self.estimator_weights_.sum()\n if n_classes == 2:\n pred[:, 0] *= -1\n return pred.sum(axis=1)\n return pred\n\n def staged_decision_function(self, X):\n \"\"\"Compute decision function of ``X`` for each boosting iteration.\n\n This method allows monitoring (i.e. determine error on testing set)\n after each boosting iteration.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Yields\n ------\n score : generator of ndarray of shape (n_samples, k)\n The decision function of the input samples. The order of\n outputs is the same of that of the :term:`classes_` attribute.\n Binary classification is a special cases with ``k == 1``,\n otherwise ``k==n_classes``. For binary classification,\n values closer to -1 or 1 mean more like the first or second\n class in ``classes_``, respectively.\n \"\"\"\n check_is_fitted(self)\n X = self._check_X(X)\n\n n_classes = self.n_classes_\n classes = self.classes_[:, np.newaxis]\n pred = None\n norm = 0.\n\n for weight, estimator in zip(self.estimator_weights_,\n self.estimators_):\n norm += weight\n\n if self.algorithm == 'SAMME.R':\n # The weights are all 1. for SAMME.R\n current_pred = _samme_proba(estimator, n_classes, X)\n else: # elif self.algorithm == \"SAMME\":\n current_pred = estimator.predict(X)\n current_pred = (current_pred == classes).T * weight\n\n if pred is None:\n pred = current_pred\n else:\n pred += current_pred\n\n if n_classes == 2:\n tmp_pred = np.copy(pred)\n tmp_pred[:, 0] *= -1\n yield (tmp_pred / norm).sum(axis=1)\n else:\n yield pred / norm\n\n @staticmethod\n def _compute_proba_from_decision(decision, n_classes):\n \"\"\"Compute probabilities from the decision function.\n\n This is based eq. (4) of [1] where:\n p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X)))\n = softmax((1 / K-1) * f(X))\n\n References\n ----------\n .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, \"Multi-class AdaBoost\",\n 2009.\n \"\"\"\n if n_classes == 2:\n decision = np.vstack([-decision, decision]).T / 2\n else:\n decision /= (n_classes - 1)\n return softmax(decision, copy=False)\n\n def predict_proba(self, X):\n \"\"\"Predict class probabilities for X.\n\n The predicted class probabilities of an input sample is computed as\n the weighted mean predicted class probabilities of the classifiers\n in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Returns\n -------\n p : ndarray of shape (n_samples, n_classes)\n The class probabilities of the input samples. The order of\n outputs is the same of that of the :term:`classes_` attribute.\n \"\"\"\n check_is_fitted(self)\n X = self._check_X(X)\n\n n_classes = self.n_classes_\n\n if n_classes == 1:\n return np.ones((_num_samples(X), 1))\n\n decision = self.decision_function(X)\n return self._compute_proba_from_decision(decision, n_classes)\n\n def staged_predict_proba(self, X):\n \"\"\"Predict class probabilities for X.\n\n The predicted class probabilities of an input sample is computed as\n the weighted mean predicted class probabilities of the classifiers\n in the ensemble.\n\n This generator method yields the ensemble predicted class probabilities\n after each iteration of boosting and therefore allows monitoring, such\n as to determine the predicted class probabilities on a test set after\n each boost.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Yields\n -------\n p : generator of ndarray of shape (n_samples,)\n The class probabilities of the input samples. The order of\n outputs is the same of that of the :term:`classes_` attribute.\n \"\"\"\n X = self._check_X(X)\n\n n_classes = self.n_classes_\n\n for decision in self.staged_decision_function(X):\n yield self._compute_proba_from_decision(decision, n_classes)\n\n def predict_log_proba(self, X):\n \"\"\"Predict class log-probabilities for X.\n\n The predicted class log-probabilities of an input sample is computed as\n the weighted mean predicted class log-probabilities of the classifiers\n in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Returns\n -------\n p : ndarray of shape (n_samples, n_classes)\n The class probabilities of the input samples. The order of\n outputs is the same of that of the :term:`classes_` attribute.\n \"\"\"\n X = self._check_X(X)\n return np.log(self.predict_proba(X))\n\n\nclass AdaBoostRegressor(RegressorMixin, BaseWeightBoosting):\n \"\"\"An AdaBoost regressor.\n\n An AdaBoost [1] regressor is a meta-estimator that begins by fitting a\n regressor on the original dataset and then fits additional copies of the\n regressor on the same dataset but where the weights of instances are\n adjusted according to the error of the current prediction. As such,\n subsequent regressors focus more on difficult cases.\n\n This class implements the algorithm known as AdaBoost.R2 [2].\n\n Read more in the :ref:`User Guide <adaboost>`.\n\n .. versionadded:: 0.14\n\n Parameters\n ----------\n base_estimator : object, default=None\n The base estimator from which the boosted ensemble is built.\n If ``None``, then the base estimator is\n :class:`~sklearn.tree.DecisionTreeRegressor` initialized with\n `max_depth=3`.\n\n n_estimators : int, default=50\n The maximum number of estimators at which boosting is terminated.\n In case of perfect fit, the learning procedure is stopped early.\n\n learning_rate : float, default=1.\n Weight applied to each classifier at each boosting iteration. A higher\n learning rate increases the contribution of each classifier. There is\n a trade-off between the `learning_rate` and `n_estimators` parameters.\n\n loss : {'linear', 'square', 'exponential'}, default='linear'\n The loss function to use when updating the weights after each\n boosting iteration.\n\n random_state : int, RandomState instance or None, default=None\n Controls the random seed given at each `base_estimator` at each\n boosting iteration.\n Thus, it is only used when `base_estimator` exposes a `random_state`.\n In addition, it controls the bootstrap of the weights used to train the\n `base_estimator` at each boosting iteration.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Attributes\n ----------\n base_estimator_ : estimator\n The base estimator from which the ensemble is grown.\n\n estimators_ : list of classifiers\n The collection of fitted sub-estimators.\n\n estimator_weights_ : ndarray of floats\n Weights for each estimator in the boosted ensemble.\n\n estimator_errors_ : ndarray of floats\n Regression error for each estimator in the boosted ensemble.\n\n feature_importances_ : ndarray of shape (n_features,)\n The impurity-based feature importances if supported by the\n ``base_estimator`` (when based on decision trees).\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n Examples\n --------\n >>> from sklearn.ensemble import AdaBoostRegressor\n >>> from sklearn.datasets import make_regression\n >>> X, y = make_regression(n_features=4, n_informative=2,\n ... random_state=0, shuffle=False)\n >>> regr = AdaBoostRegressor(random_state=0, n_estimators=100)\n >>> regr.fit(X, y)\n AdaBoostRegressor(n_estimators=100, random_state=0)\n >>> regr.predict([[0, 0, 0, 0]])\n array([4.7972...])\n >>> regr.score(X, y)\n 0.9771...\n\n See Also\n --------\n AdaBoostClassifier, GradientBoostingRegressor,\n sklearn.tree.DecisionTreeRegressor\n\n References\n ----------\n .. [1] Y. Freund, R. Schapire, \"A Decision-Theoretic Generalization of\n on-Line Learning and an Application to Boosting\", 1995.\n\n .. [2] H. Drucker, \"Improving Regressors using Boosting Techniques\", 1997.\n\n \"\"\"\n @_deprecate_positional_args\n def __init__(self,\n base_estimator=None, *,\n n_estimators=50,\n learning_rate=1.,\n loss='linear',\n random_state=None):\n\n super().__init__(\n base_estimator=base_estimator,\n n_estimators=n_estimators,\n learning_rate=learning_rate,\n random_state=random_state)\n\n self.loss = loss\n self.random_state = random_state\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Build a boosted regressor from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n y : array-like of shape (n_samples,)\n The target values (real numbers).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, the sample weights are initialized to\n 1 / n_samples.\n\n Returns\n -------\n self : object\n \"\"\"\n # Check loss\n if self.loss not in ('linear', 'square', 'exponential'):\n raise ValueError(\n \"loss must be 'linear', 'square', or 'exponential'\")\n\n # Fit\n return super().fit(X, y, sample_weight)\n\n def _validate_estimator(self):\n \"\"\"Check the estimator and set the base_estimator_ attribute.\"\"\"\n super()._validate_estimator(\n default=DecisionTreeRegressor(max_depth=3))\n\n def _boost(self, iboost, X, y, sample_weight, random_state):\n \"\"\"Implement a single boost for regression\n\n Perform a single boost according to the AdaBoost.R2 algorithm and\n return the updated sample weights.\n\n Parameters\n ----------\n iboost : int\n The index of the current boost iteration.\n\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples.\n\n y : array-like of shape (n_samples,)\n The target values (class labels in classification, real numbers in\n regression).\n\n sample_weight : array-like of shape (n_samples,)\n The current sample weights.\n\n random_state : RandomState\n The RandomState instance used if the base estimator accepts a\n `random_state` attribute.\n Controls also the bootstrap of the weights used to train the weak\n learner.\n replacement.\n\n Returns\n -------\n sample_weight : array-like of shape (n_samples,) or None\n The reweighted sample weights.\n If None then boosting has terminated early.\n\n estimator_weight : float\n The weight for the current boost.\n If None then boosting has terminated early.\n\n estimator_error : float\n The regression error for the current boost.\n If None then boosting has terminated early.\n \"\"\"\n estimator = self._make_estimator(random_state=random_state)\n\n # Weighted sampling of the training set with replacement\n bootstrap_idx = random_state.choice(\n np.arange(_num_samples(X)), size=_num_samples(X), replace=True,\n p=sample_weight\n )\n\n # Fit on the bootstrapped sample and obtain a prediction\n # for all samples in the training set\n X_ = _safe_indexing(X, bootstrap_idx)\n y_ = _safe_indexing(y, bootstrap_idx)\n estimator.fit(X_, y_)\n y_predict = estimator.predict(X)\n\n error_vect = np.abs(y_predict - y)\n sample_mask = sample_weight > 0\n masked_sample_weight = sample_weight[sample_mask]\n masked_error_vector = error_vect[sample_mask]\n\n error_max = masked_error_vector.max()\n if error_max != 0:\n masked_error_vector /= error_max\n\n if self.loss == 'square':\n masked_error_vector **= 2\n elif self.loss == 'exponential':\n masked_error_vector = 1. - np.exp(-masked_error_vector)\n\n # Calculate the average loss\n estimator_error = (masked_sample_weight * masked_error_vector).sum()\n\n if estimator_error <= 0:\n # Stop if fit is perfect\n return sample_weight, 1., 0.\n\n elif estimator_error >= 0.5:\n # Discard current estimator only if it isn't the only one\n if len(self.estimators_) > 1:\n self.estimators_.pop(-1)\n return None, None, None\n\n beta = estimator_error / (1. - estimator_error)\n\n # Boost weight using AdaBoost.R2 alg\n estimator_weight = self.learning_rate * np.log(1. / beta)\n\n if not iboost == self.n_estimators - 1:\n sample_weight[sample_mask] *= np.power(\n beta, (1. - masked_error_vector) * self.learning_rate\n )\n\n return sample_weight, estimator_weight, estimator_error\n\n def _get_median_predict(self, X, limit):\n # Evaluate predictions of all estimators\n predictions = np.array([\n est.predict(X) for est in self.estimators_[:limit]]).T\n\n # Sort the predictions\n sorted_idx = np.argsort(predictions, axis=1)\n\n # Find index of median prediction for each sample\n weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1)\n median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]\n median_idx = median_or_above.argmax(axis=1)\n\n median_estimators = sorted_idx[np.arange(_num_samples(X)), median_idx]\n\n # Return median predictions\n return predictions[np.arange(_num_samples(X)), median_estimators]\n\n def predict(self, X):\n \"\"\"Predict regression value for X.\n\n The predicted regression value of an input sample is computed\n as the weighted median prediction of the classifiers in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n The predicted regression values.\n \"\"\"\n check_is_fitted(self)\n X = self._check_X(X)\n\n return self._get_median_predict(X, len(self.estimators_))\n\n def staged_predict(self, X):\n \"\"\"Return staged predictions for X.\n\n The predicted regression value of an input sample is computed\n as the weighted median prediction of the classifiers in the ensemble.\n\n This generator method yields the ensemble prediction after each\n iteration of boosting and therefore allows monitoring, such as to\n determine the prediction on a test set after each boost.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples.\n\n Yields\n -------\n y : generator of ndarray of shape (n_samples,)\n The predicted regression values.\n \"\"\"\n check_is_fitted(self)\n X = self._check_X(X)\n\n for i, _ in enumerate(self.estimators_, 1):\n yield self._get_median_predict(X, limit=i)\n", "\"\"\"\nThis module gathers tree-based methods, including decision, regression and\nrandomized trees. Single and multi-output problems are both handled.\n\"\"\"\n\n# Authors: Gilles Louppe <[email protected]>\n# Peter Prettenhofer <[email protected]>\n# Brian Holt <[email protected]>\n# Noel Dawe <[email protected]>\n# Satrajit Gosh <[email protected]>\n# Joly Arnaud <[email protected]>\n# Fares Hedayati <[email protected]>\n# Nelson Liu <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numbers\nimport warnings\nimport copy\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom math import ceil\n\nimport numpy as np\nfrom scipy.sparse import issparse\n\nfrom ..base import BaseEstimator\nfrom ..base import ClassifierMixin\nfrom ..base import clone\nfrom ..base import RegressorMixin\nfrom ..base import is_classifier\nfrom ..base import MultiOutputMixin\nfrom ..utils import Bunch\nfrom ..utils import check_random_state\nfrom ..utils.validation import _check_sample_weight\nfrom ..utils import compute_sample_weight\nfrom ..utils.multiclass import check_classification_targets\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.validation import _deprecate_positional_args\n\nfrom ._criterion import Criterion\nfrom ._splitter import Splitter\nfrom ._tree import DepthFirstTreeBuilder\nfrom ._tree import BestFirstTreeBuilder\nfrom ._tree import Tree\nfrom ._tree import _build_pruned_tree_ccp\nfrom ._tree import ccp_pruning_path\nfrom . import _tree, _splitter, _criterion\n\n__all__ = [\"DecisionTreeClassifier\",\n \"DecisionTreeRegressor\",\n \"ExtraTreeClassifier\",\n \"ExtraTreeRegressor\"]\n\n\n# =============================================================================\n# Types and constants\n# =============================================================================\n\nDTYPE = _tree.DTYPE\nDOUBLE = _tree.DOUBLE\n\nCRITERIA_CLF = {\"gini\": _criterion.Gini,\n \"entropy\": _criterion.Entropy}\nCRITERIA_REG = {\"mse\": _criterion.MSE,\n \"friedman_mse\": _criterion.FriedmanMSE,\n \"mae\": _criterion.MAE,\n \"poisson\": _criterion.Poisson}\n\nDENSE_SPLITTERS = {\"best\": _splitter.BestSplitter,\n \"random\": _splitter.RandomSplitter}\n\nSPARSE_SPLITTERS = {\"best\": _splitter.BestSparseSplitter,\n \"random\": _splitter.RandomSparseSplitter}\n\n# =============================================================================\n# Base decision tree\n# =============================================================================\n\n\nclass BaseDecisionTree(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta):\n \"\"\"Base class for decision trees.\n\n Warning: This class should not be used directly.\n Use derived classes instead.\n \"\"\"\n\n @abstractmethod\n @_deprecate_positional_args\n def __init__(self, *,\n criterion,\n splitter,\n max_depth,\n min_samples_split,\n min_samples_leaf,\n min_weight_fraction_leaf,\n max_features,\n max_leaf_nodes,\n random_state,\n min_impurity_decrease,\n min_impurity_split,\n class_weight=None,\n ccp_alpha=0.0):\n self.criterion = criterion\n self.splitter = splitter\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.max_features = max_features\n self.max_leaf_nodes = max_leaf_nodes\n self.random_state = random_state\n self.min_impurity_decrease = min_impurity_decrease\n self.min_impurity_split = min_impurity_split\n self.class_weight = class_weight\n self.ccp_alpha = ccp_alpha\n\n def get_depth(self):\n \"\"\"Return the depth of the decision tree.\n\n The depth of a tree is the maximum distance between the root\n and any leaf.\n\n Returns\n -------\n self.tree_.max_depth : int\n The maximum depth of the tree.\n \"\"\"\n check_is_fitted(self)\n return self.tree_.max_depth\n\n def get_n_leaves(self):\n \"\"\"Return the number of leaves of the decision tree.\n\n Returns\n -------\n self.tree_.n_leaves : int\n Number of leaves.\n \"\"\"\n check_is_fitted(self)\n return self.tree_.n_leaves\n\n def fit(self, X, y, sample_weight=None, check_input=True,\n X_idx_sorted=\"deprecated\"):\n\n random_state = check_random_state(self.random_state)\n\n if self.ccp_alpha < 0.0:\n raise ValueError(\"ccp_alpha must be greater than or equal to 0\")\n\n if check_input:\n # Need to validate separately here.\n # We can't pass multi_ouput=True because that would allow y to be\n # csr.\n check_X_params = dict(dtype=DTYPE, accept_sparse=\"csc\")\n check_y_params = dict(ensure_2d=False, dtype=None)\n X, y = self._validate_data(X, y,\n validate_separately=(check_X_params,\n check_y_params))\n if issparse(X):\n X.sort_indices()\n\n if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:\n raise ValueError(\"No support for np.int64 index based \"\n \"sparse matrices\")\n\n if self.criterion == \"poisson\":\n if np.any(y < 0):\n raise ValueError(\"Some value(s) of y are negative which is\"\n \" not allowed for Poisson regression.\")\n if np.sum(y) <= 0:\n raise ValueError(\"Sum of y is not positive which is \"\n \"necessary for Poisson regression.\")\n\n # Determine output settings\n n_samples, self.n_features_ = X.shape\n self.n_features_in_ = self.n_features_\n is_classification = is_classifier(self)\n\n y = np.atleast_1d(y)\n expanded_class_weight = None\n\n if y.ndim == 1:\n # reshape is necessary to preserve the data contiguity against vs\n # [:, np.newaxis] that does not.\n y = np.reshape(y, (-1, 1))\n\n self.n_outputs_ = y.shape[1]\n\n if is_classification:\n check_classification_targets(y)\n y = np.copy(y)\n\n self.classes_ = []\n self.n_classes_ = []\n\n if self.class_weight is not None:\n y_original = np.copy(y)\n\n y_encoded = np.zeros(y.shape, dtype=int)\n for k in range(self.n_outputs_):\n classes_k, y_encoded[:, k] = np.unique(y[:, k],\n return_inverse=True)\n self.classes_.append(classes_k)\n self.n_classes_.append(classes_k.shape[0])\n y = y_encoded\n\n if self.class_weight is not None:\n expanded_class_weight = compute_sample_weight(\n self.class_weight, y_original)\n\n self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)\n\n if getattr(y, \"dtype\", None) != DOUBLE or not y.flags.contiguous:\n y = np.ascontiguousarray(y, dtype=DOUBLE)\n\n # Check parameters\n max_depth = (np.iinfo(np.int32).max if self.max_depth is None\n else self.max_depth)\n max_leaf_nodes = (-1 if self.max_leaf_nodes is None\n else self.max_leaf_nodes)\n\n if isinstance(self.min_samples_leaf, numbers.Integral):\n if not 1 <= self.min_samples_leaf:\n raise ValueError(\"min_samples_leaf must be at least 1 \"\n \"or in (0, 0.5], got %s\"\n % self.min_samples_leaf)\n min_samples_leaf = self.min_samples_leaf\n else: # float\n if not 0. < self.min_samples_leaf <= 0.5:\n raise ValueError(\"min_samples_leaf must be at least 1 \"\n \"or in (0, 0.5], got %s\"\n % self.min_samples_leaf)\n min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))\n\n if isinstance(self.min_samples_split, numbers.Integral):\n if not 2 <= self.min_samples_split:\n raise ValueError(\"min_samples_split must be an integer \"\n \"greater than 1 or a float in (0.0, 1.0]; \"\n \"got the integer %s\"\n % self.min_samples_split)\n min_samples_split = self.min_samples_split\n else: # float\n if not 0. < self.min_samples_split <= 1.:\n raise ValueError(\"min_samples_split must be an integer \"\n \"greater than 1 or a float in (0.0, 1.0]; \"\n \"got the float %s\"\n % self.min_samples_split)\n min_samples_split = int(ceil(self.min_samples_split * n_samples))\n min_samples_split = max(2, min_samples_split)\n\n min_samples_split = max(min_samples_split, 2 * min_samples_leaf)\n\n if isinstance(self.max_features, str):\n if self.max_features == \"auto\":\n if is_classification:\n max_features = max(1, int(np.sqrt(self.n_features_)))\n else:\n max_features = self.n_features_\n elif self.max_features == \"sqrt\":\n max_features = max(1, int(np.sqrt(self.n_features_)))\n elif self.max_features == \"log2\":\n max_features = max(1, int(np.log2(self.n_features_)))\n else:\n raise ValueError(\"Invalid value for max_features. \"\n \"Allowed string values are 'auto', \"\n \"'sqrt' or 'log2'.\")\n elif self.max_features is None:\n max_features = self.n_features_\n elif isinstance(self.max_features, numbers.Integral):\n max_features = self.max_features\n else: # float\n if self.max_features > 0.0:\n max_features = max(1,\n int(self.max_features * self.n_features_))\n else:\n max_features = 0\n\n self.max_features_ = max_features\n\n if len(y) != n_samples:\n raise ValueError(\"Number of labels=%d does not match \"\n \"number of samples=%d\" % (len(y), n_samples))\n if not 0 <= self.min_weight_fraction_leaf <= 0.5:\n raise ValueError(\"min_weight_fraction_leaf must in [0, 0.5]\")\n if max_depth <= 0:\n raise ValueError(\"max_depth must be greater than zero. \")\n if not (0 < max_features <= self.n_features_):\n raise ValueError(\"max_features must be in (0, n_features]\")\n if not isinstance(max_leaf_nodes, numbers.Integral):\n raise ValueError(\"max_leaf_nodes must be integral number but was \"\n \"%r\" % max_leaf_nodes)\n if -1 < max_leaf_nodes < 2:\n raise ValueError((\"max_leaf_nodes {0} must be either None \"\n \"or larger than 1\").format(max_leaf_nodes))\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X, DOUBLE)\n\n if expanded_class_weight is not None:\n if sample_weight is not None:\n sample_weight = sample_weight * expanded_class_weight\n else:\n sample_weight = expanded_class_weight\n\n # Set min_weight_leaf from min_weight_fraction_leaf\n if sample_weight is None:\n min_weight_leaf = (self.min_weight_fraction_leaf *\n n_samples)\n else:\n min_weight_leaf = (self.min_weight_fraction_leaf *\n np.sum(sample_weight))\n\n min_impurity_split = self.min_impurity_split\n if min_impurity_split is not None:\n warnings.warn(\n \"The min_impurity_split parameter is deprecated. Its default \"\n \"value has changed from 1e-7 to 0 in version 0.23, and it \"\n \"will be removed in 1.0 (renaming of 0.25). Use the \"\n \"min_impurity_decrease parameter instead.\",\n FutureWarning\n )\n\n if min_impurity_split < 0.:\n raise ValueError(\"min_impurity_split must be greater than \"\n \"or equal to 0\")\n else:\n min_impurity_split = 0\n\n if self.min_impurity_decrease < 0.:\n raise ValueError(\"min_impurity_decrease must be greater than \"\n \"or equal to 0\")\n\n # TODO: Remove in 1.1\n if X_idx_sorted != \"deprecated\":\n warnings.warn(\n \"The parameter 'X_idx_sorted' is deprecated and has no \"\n \"effect. It will be removed in 1.1 (renaming of 0.26). You \"\n \"can suppress this warning by not passing any value to the \"\n \"'X_idx_sorted' parameter.\",\n FutureWarning\n )\n\n # Build tree\n criterion = self.criterion\n if not isinstance(criterion, Criterion):\n if is_classification:\n criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,\n self.n_classes_)\n else:\n criterion = CRITERIA_REG[self.criterion](self.n_outputs_,\n n_samples)\n else:\n # Make a deepcopy in case the criterion has mutable attributes that\n # might be shared and modified concurrently during parallel fitting\n criterion = copy.deepcopy(criterion)\n\n SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS\n\n splitter = self.splitter\n if not isinstance(self.splitter, Splitter):\n splitter = SPLITTERS[self.splitter](criterion,\n self.max_features_,\n min_samples_leaf,\n min_weight_leaf,\n random_state)\n\n if is_classifier(self):\n self.tree_ = Tree(self.n_features_,\n self.n_classes_, self.n_outputs_)\n else:\n self.tree_ = Tree(self.n_features_,\n # TODO: tree should't need this in this case\n np.array([1] * self.n_outputs_, dtype=np.intp),\n self.n_outputs_)\n\n # Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise\n if max_leaf_nodes < 0:\n builder = DepthFirstTreeBuilder(splitter, min_samples_split,\n min_samples_leaf,\n min_weight_leaf,\n max_depth,\n self.min_impurity_decrease,\n min_impurity_split)\n else:\n builder = BestFirstTreeBuilder(splitter, min_samples_split,\n min_samples_leaf,\n min_weight_leaf,\n max_depth,\n max_leaf_nodes,\n self.min_impurity_decrease,\n min_impurity_split)\n\n builder.build(self.tree_, X, y, sample_weight)\n\n if self.n_outputs_ == 1 and is_classifier(self):\n self.n_classes_ = self.n_classes_[0]\n self.classes_ = self.classes_[0]\n\n self._prune_tree()\n\n return self\n\n def _validate_X_predict(self, X, check_input):\n \"\"\"Validate the training data on predict (probabilities).\"\"\"\n if check_input:\n X = self._validate_data(X, dtype=DTYPE, accept_sparse=\"csr\",\n reset=False)\n if issparse(X) and (X.indices.dtype != np.intc or\n X.indptr.dtype != np.intc):\n raise ValueError(\"No support for np.int64 index based \"\n \"sparse matrices\")\n else:\n # The number of features is checked regardless of `check_input`\n self._check_n_features(X, reset=False)\n return X\n\n def predict(self, X, check_input=True):\n \"\"\"Predict class or regression value for X.\n\n For a classification model, the predicted class for each sample in X is\n returned. For a regression model, the predicted value based on X is\n returned.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n The predicted classes, or the predict values.\n \"\"\"\n check_is_fitted(self)\n X = self._validate_X_predict(X, check_input)\n proba = self.tree_.predict(X)\n n_samples = X.shape[0]\n\n # Classification\n if is_classifier(self):\n if self.n_outputs_ == 1:\n return self.classes_.take(np.argmax(proba, axis=1), axis=0)\n\n else:\n class_type = self.classes_[0].dtype\n predictions = np.zeros((n_samples, self.n_outputs_),\n dtype=class_type)\n for k in range(self.n_outputs_):\n predictions[:, k] = self.classes_[k].take(\n np.argmax(proba[:, k], axis=1),\n axis=0)\n\n return predictions\n\n # Regression\n else:\n if self.n_outputs_ == 1:\n return proba[:, 0]\n\n else:\n return proba[:, :, 0]\n\n def apply(self, X, check_input=True):\n \"\"\"Return the index of the leaf that each sample is predicted as.\n\n .. versionadded:: 0.17\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n X_leaves : array-like of shape (n_samples,)\n For each datapoint x in X, return the index of the leaf x\n ends up in. Leaves are numbered within\n ``[0; self.tree_.node_count)``, possibly with gaps in the\n numbering.\n \"\"\"\n check_is_fitted(self)\n X = self._validate_X_predict(X, check_input)\n return self.tree_.apply(X)\n\n def decision_path(self, X, check_input=True):\n \"\"\"Return the decision path in the tree.\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n indicator : sparse matrix of shape (n_samples, n_nodes)\n Return a node indicator CSR matrix where non zero elements\n indicates that the samples goes through the nodes.\n \"\"\"\n X = self._validate_X_predict(X, check_input)\n return self.tree_.decision_path(X)\n\n def _prune_tree(self):\n \"\"\"Prune tree using Minimal Cost-Complexity Pruning.\"\"\"\n check_is_fitted(self)\n\n if self.ccp_alpha < 0.0:\n raise ValueError(\"ccp_alpha must be greater than or equal to 0\")\n\n if self.ccp_alpha == 0.0:\n return\n\n # build pruned tree\n if is_classifier(self):\n n_classes = np.atleast_1d(self.n_classes_)\n pruned_tree = Tree(self.n_features_, n_classes, self.n_outputs_)\n else:\n pruned_tree = Tree(self.n_features_,\n # TODO: the tree shouldn't need this param\n np.array([1] * self.n_outputs_, dtype=np.intp),\n self.n_outputs_)\n _build_pruned_tree_ccp(pruned_tree, self.tree_, self.ccp_alpha)\n\n self.tree_ = pruned_tree\n\n def cost_complexity_pruning_path(self, X, y, sample_weight=None):\n \"\"\"Compute the pruning path during Minimal Cost-Complexity Pruning.\n\n See :ref:`minimal_cost_complexity_pruning` for details on the pruning\n process.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csc_matrix``.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n The target values (class labels) as integers or strings.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted. Splits\n that would create child nodes with net zero or negative weight are\n ignored while searching for a split in each node. Splits are also\n ignored if they would result in any single class carrying a\n negative weight in either child node.\n\n Returns\n -------\n ccp_path : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n ccp_alphas : ndarray\n Effective alphas of subtree during pruning.\n\n impurities : ndarray\n Sum of the impurities of the subtree leaves for the\n corresponding alpha value in ``ccp_alphas``.\n \"\"\"\n est = clone(self).set_params(ccp_alpha=0.0)\n est.fit(X, y, sample_weight=sample_weight)\n return Bunch(**ccp_pruning_path(est.tree_))\n\n @property\n def feature_importances_(self):\n \"\"\"Return the feature importances.\n\n The importance of a feature is computed as the (normalized) total\n reduction of the criterion brought by that feature.\n It is also known as the Gini importance.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n Returns\n -------\n feature_importances_ : ndarray of shape (n_features,)\n Normalized total reduction of criteria by feature\n (Gini importance).\n \"\"\"\n check_is_fitted(self)\n\n return self.tree_.compute_feature_importances()\n\n\n# =============================================================================\n# Public estimators\n# =============================================================================\n\nclass DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree):\n \"\"\"A decision tree classifier.\n\n Read more in the :ref:`User Guide <tree>`.\n\n Parameters\n ----------\n criterion : {\"gini\", \"entropy\"}, default=\"gini\"\n The function to measure the quality of a split. Supported criteria are\n \"gini\" for the Gini impurity and \"entropy\" for the information gain.\n\n splitter : {\"best\", \"random\"}, default=\"best\"\n The strategy used to choose the split at each node. Supported\n strategies are \"best\" to choose the best split and \"random\" to choose\n the best random split.\n\n max_depth : int, default=None\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n\n min_samples_split : int or float, default=2\n The minimum number of samples required to split an internal node:\n\n - If int, then consider `min_samples_split` as the minimum number.\n - If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_samples_leaf : int or float, default=1\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` training samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression.\n\n - If int, then consider `min_samples_leaf` as the minimum number.\n - If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_weight_fraction_leaf : float, default=0.0\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n\n max_features : int, float or {\"auto\", \"sqrt\", \"log2\"}, default=None\n The number of features to consider when looking for the best split:\n\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=sqrt(n_features)`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n random_state : int, RandomState instance or None, default=None\n Controls the randomness of the estimator. The features are always\n randomly permuted at each split, even if ``splitter`` is set to\n ``\"best\"``. When ``max_features < n_features``, the algorithm will\n select ``max_features`` at random at each split before finding the best\n split among them. But the best found split may vary across different\n runs, even if ``max_features=n_features``. That is the case, if the\n improvement of the criterion is identical for several splits and one\n split has to be selected at random. To obtain a deterministic behaviour\n during fitting, ``random_state`` has to be fixed to an integer.\n See :term:`Glossary <random_state>` for details.\n\n max_leaf_nodes : int, default=None\n Grow a tree with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n\n min_impurity_decrease : float, default=0.0\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n .. versionadded:: 0.19\n\n min_impurity_split : float, default=0\n Threshold for early stopping in tree growth. A node will split\n if its impurity is above the threshold, otherwise it is a leaf.\n\n .. deprecated:: 0.19\n ``min_impurity_split`` has been deprecated in favor of\n ``min_impurity_decrease`` in 0.19. The default value of\n ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it\n will be removed in 1.0 (renaming of 0.25).\n Use ``min_impurity_decrease`` instead.\n\n class_weight : dict, list of dict or \"balanced\", default=None\n Weights associated with classes in the form ``{class_label: weight}``.\n If None, all classes are supposed to have weight one. For\n multi-output problems, a list of dicts can be provided in the same\n order as the columns of y.\n\n Note that for multioutput (including multilabel) weights should be\n defined for each class of every column in its own dict. For example,\n for four-class multilabel classification weights should be\n [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n [{1:1}, {2:5}, {3:1}, {4:1}].\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n\n For multi-output, the weights of each column of y will be multiplied.\n\n Note that these weights will be multiplied with sample_weight (passed\n through the fit method) if sample_weight is specified.\n\n ccp_alpha : non-negative float, default=0.0\n Complexity parameter used for Minimal Cost-Complexity Pruning. The\n subtree with the largest cost complexity that is smaller than\n ``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n :ref:`minimal_cost_complexity_pruning` for details.\n\n .. versionadded:: 0.22\n\n Attributes\n ----------\n classes_ : ndarray of shape (n_classes,) or list of ndarray\n The classes labels (single output problem),\n or a list of arrays of class labels (multi-output problem).\n\n feature_importances_ : ndarray of shape (n_features,)\n The impurity-based feature importances.\n The higher, the more important the feature.\n The importance of a feature is computed as the (normalized)\n total reduction of the criterion brought by that feature. It is also\n known as the Gini importance [4]_.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n max_features_ : int\n The inferred value of max_features.\n\n n_classes_ : int or list of int\n The number of classes (for single output problems),\n or a list containing the number of classes for each\n output (for multi-output problems).\n\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n n_outputs_ : int\n The number of outputs when ``fit`` is performed.\n\n tree_ : Tree instance\n The underlying Tree object. Please refer to\n ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and\n :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`\n for basic usage of these attributes.\n\n See Also\n --------\n DecisionTreeRegressor : A decision tree regressor.\n\n Notes\n -----\n The default values for the parameters controlling the size of the trees\n (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and\n unpruned trees which can potentially be very large on some data sets. To\n reduce memory consumption, the complexity and size of the trees should be\n controlled by setting those parameter values.\n\n The :meth:`predict` method operates using the :func:`numpy.argmax`\n function on the outputs of :meth:`predict_proba`. This means that in\n case the highest predicted probabilities are tied, the classifier will\n predict the tied class with the lowest index in :term:`classes_`.\n\n References\n ----------\n\n .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning\n\n .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, \"Classification\n and Regression Trees\", Wadsworth, Belmont, CA, 1984.\n\n .. [3] T. Hastie, R. Tibshirani and J. Friedman. \"Elements of Statistical\n Learning\", Springer, 2009.\n\n .. [4] L. Breiman, and A. Cutler, \"Random Forests\",\n https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm\n\n Examples\n --------\n >>> from sklearn.datasets import load_iris\n >>> from sklearn.model_selection import cross_val_score\n >>> from sklearn.tree import DecisionTreeClassifier\n >>> clf = DecisionTreeClassifier(random_state=0)\n >>> iris = load_iris()\n >>> cross_val_score(clf, iris.data, iris.target, cv=10)\n ... # doctest: +SKIP\n ...\n array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,\n 0.93..., 0.93..., 1. , 0.93..., 1. ])\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, *,\n criterion=\"gini\",\n splitter=\"best\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.,\n max_features=None,\n random_state=None,\n max_leaf_nodes=None,\n min_impurity_decrease=0.,\n min_impurity_split=None,\n class_weight=None,\n ccp_alpha=0.0):\n super().__init__(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n class_weight=class_weight,\n random_state=random_state,\n min_impurity_decrease=min_impurity_decrease,\n min_impurity_split=min_impurity_split,\n ccp_alpha=ccp_alpha)\n\n def fit(self, X, y, sample_weight=None, check_input=True,\n X_idx_sorted=\"deprecated\"):\n \"\"\"Build a decision tree classifier from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csc_matrix``.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n The target values (class labels) as integers or strings.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted. Splits\n that would create child nodes with net zero or negative weight are\n ignored while searching for a split in each node. Splits are also\n ignored if they would result in any single class carrying a\n negative weight in either child node.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n X_idx_sorted : deprecated, default=\"deprecated\"\n This parameter is deprecated and has no effect.\n It will be removed in 1.1 (renaming of 0.26).\n\n .. deprecated :: 0.24\n\n Returns\n -------\n self : DecisionTreeClassifier\n Fitted estimator.\n \"\"\"\n\n super().fit(\n X, y,\n sample_weight=sample_weight,\n check_input=check_input,\n X_idx_sorted=X_idx_sorted)\n return self\n\n def predict_proba(self, X, check_input=True):\n \"\"\"Predict class probabilities of the input samples X.\n\n The predicted class probability is the fraction of samples of the same\n class in a leaf.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Returns\n -------\n proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \\\n such arrays if n_outputs > 1\n The class probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n \"\"\"\n check_is_fitted(self)\n X = self._validate_X_predict(X, check_input)\n proba = self.tree_.predict(X)\n\n if self.n_outputs_ == 1:\n proba = proba[:, :self.n_classes_]\n normalizer = proba.sum(axis=1)[:, np.newaxis]\n normalizer[normalizer == 0.0] = 1.0\n proba /= normalizer\n\n return proba\n\n else:\n all_proba = []\n\n for k in range(self.n_outputs_):\n proba_k = proba[:, k, :self.n_classes_[k]]\n normalizer = proba_k.sum(axis=1)[:, np.newaxis]\n normalizer[normalizer == 0.0] = 1.0\n proba_k /= normalizer\n all_proba.append(proba_k)\n\n return all_proba\n\n def predict_log_proba(self, X):\n \"\"\"Predict class log-probabilities of the input samples X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \\\n such arrays if n_outputs > 1\n The class log-probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n \"\"\"\n proba = self.predict_proba(X)\n\n if self.n_outputs_ == 1:\n return np.log(proba)\n\n else:\n for k in range(self.n_outputs_):\n proba[k] = np.log(proba[k])\n\n return proba\n\n\nclass DecisionTreeRegressor(RegressorMixin, BaseDecisionTree):\n \"\"\"A decision tree regressor.\n\n Read more in the :ref:`User Guide <tree>`.\n\n Parameters\n ----------\n criterion : {\"mse\", \"friedman_mse\", \"mae\", \"poisson\"}, default=\"mse\"\n The function to measure the quality of a split. Supported criteria\n are \"mse\" for the mean squared error, which is equal to variance\n reduction as feature selection criterion and minimizes the L2 loss\n using the mean of each terminal node, \"friedman_mse\", which uses mean\n squared error with Friedman's improvement score for potential splits,\n \"mae\" for the mean absolute error, which minimizes the L1 loss using\n the median of each terminal node, and \"poisson\" which uses reduction in\n Poisson deviance to find splits.\n\n .. versionadded:: 0.18\n Mean Absolute Error (MAE) criterion.\n\n .. versionadded:: 0.24\n Poisson deviance criterion.\n\n splitter : {\"best\", \"random\"}, default=\"best\"\n The strategy used to choose the split at each node. Supported\n strategies are \"best\" to choose the best split and \"random\" to choose\n the best random split.\n\n max_depth : int, default=None\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n\n min_samples_split : int or float, default=2\n The minimum number of samples required to split an internal node:\n\n - If int, then consider `min_samples_split` as the minimum number.\n - If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_samples_leaf : int or float, default=1\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` training samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression.\n\n - If int, then consider `min_samples_leaf` as the minimum number.\n - If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_weight_fraction_leaf : float, default=0.0\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n\n max_features : int, float or {\"auto\", \"sqrt\", \"log2\"}, default=None\n The number of features to consider when looking for the best split:\n\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=n_features`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n random_state : int, RandomState instance or None, default=None\n Controls the randomness of the estimator. The features are always\n randomly permuted at each split, even if ``splitter`` is set to\n ``\"best\"``. When ``max_features < n_features``, the algorithm will\n select ``max_features`` at random at each split before finding the best\n split among them. But the best found split may vary across different\n runs, even if ``max_features=n_features``. That is the case, if the\n improvement of the criterion is identical for several splits and one\n split has to be selected at random. To obtain a deterministic behaviour\n during fitting, ``random_state`` has to be fixed to an integer.\n See :term:`Glossary <random_state>` for details.\n\n max_leaf_nodes : int, default=None\n Grow a tree with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n\n min_impurity_decrease : float, default=0.0\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n .. versionadded:: 0.19\n\n min_impurity_split : float, default=0\n Threshold for early stopping in tree growth. A node will split\n if its impurity is above the threshold, otherwise it is a leaf.\n\n .. deprecated:: 0.19\n ``min_impurity_split`` has been deprecated in favor of\n ``min_impurity_decrease`` in 0.19. The default value of\n ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it\n will be removed in 1.0 (renaming of 0.25).\n Use ``min_impurity_decrease`` instead.\n\n ccp_alpha : non-negative float, default=0.0\n Complexity parameter used for Minimal Cost-Complexity Pruning. The\n subtree with the largest cost complexity that is smaller than\n ``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n :ref:`minimal_cost_complexity_pruning` for details.\n\n .. versionadded:: 0.22\n\n Attributes\n ----------\n feature_importances_ : ndarray of shape (n_features,)\n The feature importances.\n The higher, the more important the feature.\n The importance of a feature is computed as the\n (normalized) total reduction of the criterion brought\n by that feature. It is also known as the Gini importance [4]_.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n max_features_ : int\n The inferred value of max_features.\n\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n n_outputs_ : int\n The number of outputs when ``fit`` is performed.\n\n tree_ : Tree instance\n The underlying Tree object. Please refer to\n ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and\n :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`\n for basic usage of these attributes.\n\n See Also\n --------\n DecisionTreeClassifier : A decision tree classifier.\n\n Notes\n -----\n The default values for the parameters controlling the size of the trees\n (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and\n unpruned trees which can potentially be very large on some data sets. To\n reduce memory consumption, the complexity and size of the trees should be\n controlled by setting those parameter values.\n\n References\n ----------\n\n .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning\n\n .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, \"Classification\n and Regression Trees\", Wadsworth, Belmont, CA, 1984.\n\n .. [3] T. Hastie, R. Tibshirani and J. Friedman. \"Elements of Statistical\n Learning\", Springer, 2009.\n\n .. [4] L. Breiman, and A. Cutler, \"Random Forests\",\n https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm\n\n Examples\n --------\n >>> from sklearn.datasets import load_diabetes\n >>> from sklearn.model_selection import cross_val_score\n >>> from sklearn.tree import DecisionTreeRegressor\n >>> X, y = load_diabetes(return_X_y=True)\n >>> regressor = DecisionTreeRegressor(random_state=0)\n >>> cross_val_score(regressor, X, y, cv=10)\n ... # doctest: +SKIP\n ...\n array([-0.39..., -0.46..., 0.02..., 0.06..., -0.50...,\n 0.16..., 0.11..., -0.73..., -0.30..., -0.00...])\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, *,\n criterion=\"mse\",\n splitter=\"best\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.,\n max_features=None,\n random_state=None,\n max_leaf_nodes=None,\n min_impurity_decrease=0.,\n min_impurity_split=None,\n ccp_alpha=0.0):\n super().__init__(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n random_state=random_state,\n min_impurity_decrease=min_impurity_decrease,\n min_impurity_split=min_impurity_split,\n ccp_alpha=ccp_alpha)\n\n def fit(self, X, y, sample_weight=None, check_input=True,\n X_idx_sorted=\"deprecated\"):\n \"\"\"Build a decision tree regressor from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csc_matrix``.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n The target values (real numbers). Use ``dtype=np.float64`` and\n ``order='C'`` for maximum efficiency.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted. Splits\n that would create child nodes with net zero or negative weight are\n ignored while searching for a split in each node.\n\n check_input : bool, default=True\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n X_idx_sorted : deprecated, default=\"deprecated\"\n This parameter is deprecated and has no effect.\n It will be removed in 1.1 (renaming of 0.26).\n\n .. deprecated :: 0.24\n\n Returns\n -------\n self : DecisionTreeRegressor\n Fitted estimator.\n \"\"\"\n\n super().fit(\n X, y,\n sample_weight=sample_weight,\n check_input=check_input,\n X_idx_sorted=X_idx_sorted)\n return self\n\n def _compute_partial_dependence_recursion(self, grid, target_features):\n \"\"\"Fast partial dependence computation.\n\n Parameters\n ----------\n grid : ndarray of shape (n_samples, n_target_features)\n The grid points on which the partial dependence should be\n evaluated.\n target_features : ndarray of shape (n_target_features)\n The set of target features for which the partial dependence\n should be evaluated.\n\n Returns\n -------\n averaged_predictions : ndarray of shape (n_samples,)\n The value of the partial dependence function on each grid point.\n \"\"\"\n grid = np.asarray(grid, dtype=DTYPE, order='C')\n averaged_predictions = np.zeros(shape=grid.shape[0],\n dtype=np.float64, order='C')\n\n self.tree_.compute_partial_dependence(\n grid, target_features, averaged_predictions)\n return averaged_predictions\n\n\nclass ExtraTreeClassifier(DecisionTreeClassifier):\n \"\"\"An extremely randomized tree classifier.\n\n Extra-trees differ from classic decision trees in the way they are built.\n When looking for the best split to separate the samples of a node into two\n groups, random splits are drawn for each of the `max_features` randomly\n selected features and the best split among those is chosen. When\n `max_features` is set 1, this amounts to building a totally random\n decision tree.\n\n Warning: Extra-trees should only be used within ensemble methods.\n\n Read more in the :ref:`User Guide <tree>`.\n\n Parameters\n ----------\n criterion : {\"gini\", \"entropy\"}, default=\"gini\"\n The function to measure the quality of a split. Supported criteria are\n \"gini\" for the Gini impurity and \"entropy\" for the information gain.\n\n splitter : {\"random\", \"best\"}, default=\"random\"\n The strategy used to choose the split at each node. Supported\n strategies are \"best\" to choose the best split and \"random\" to choose\n the best random split.\n\n max_depth : int, default=None\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n\n min_samples_split : int or float, default=2\n The minimum number of samples required to split an internal node:\n\n - If int, then consider `min_samples_split` as the minimum number.\n - If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_samples_leaf : int or float, default=1\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` training samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression.\n\n - If int, then consider `min_samples_leaf` as the minimum number.\n - If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_weight_fraction_leaf : float, default=0.0\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n\n max_features : int, float, {\"auto\", \"sqrt\", \"log2\"} or None, default=\"auto\"\n The number of features to consider when looking for the best split:\n\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=sqrt(n_features)`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n random_state : int, RandomState instance or None, default=None\n Used to pick randomly the `max_features` used at each split.\n See :term:`Glossary <random_state>` for details.\n\n max_leaf_nodes : int, default=None\n Grow a tree with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n\n min_impurity_decrease : float, default=0.0\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n .. versionadded:: 0.19\n\n min_impurity_split : float, default=None\n Threshold for early stopping in tree growth. A node will split\n if its impurity is above the threshold, otherwise it is a leaf.\n\n .. deprecated:: 0.19\n ``min_impurity_split`` has been deprecated in favor of\n ``min_impurity_decrease`` in 0.19. The default value of\n ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it\n will be removed in 1.0 (renaming of 0.25).\n Use ``min_impurity_decrease`` instead.\n\n class_weight : dict, list of dict or \"balanced\", default=None\n Weights associated with classes in the form ``{class_label: weight}``.\n If None, all classes are supposed to have weight one. For\n multi-output problems, a list of dicts can be provided in the same\n order as the columns of y.\n\n Note that for multioutput (including multilabel) weights should be\n defined for each class of every column in its own dict. For example,\n for four-class multilabel classification weights should be\n [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n [{1:1}, {2:5}, {3:1}, {4:1}].\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n\n For multi-output, the weights of each column of y will be multiplied.\n\n Note that these weights will be multiplied with sample_weight (passed\n through the fit method) if sample_weight is specified.\n\n ccp_alpha : non-negative float, default=0.0\n Complexity parameter used for Minimal Cost-Complexity Pruning. The\n subtree with the largest cost complexity that is smaller than\n ``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n :ref:`minimal_cost_complexity_pruning` for details.\n\n .. versionadded:: 0.22\n\n Attributes\n ----------\n classes_ : ndarray of shape (n_classes,) or list of ndarray\n The classes labels (single output problem),\n or a list of arrays of class labels (multi-output problem).\n\n max_features_ : int\n The inferred value of max_features.\n\n n_classes_ : int or list of int\n The number of classes (for single output problems),\n or a list containing the number of classes for each\n output (for multi-output problems).\n\n feature_importances_ : ndarray of shape (n_features,)\n The impurity-based feature importances.\n The higher, the more important the feature.\n The importance of a feature is computed as the (normalized)\n total reduction of the criterion brought by that feature. It is also\n known as the Gini importance.\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n n_outputs_ : int\n The number of outputs when ``fit`` is performed.\n\n tree_ : Tree instance\n The underlying Tree object. Please refer to\n ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and\n :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`\n for basic usage of these attributes.\n\n See Also\n --------\n ExtraTreeRegressor : An extremely randomized tree regressor.\n sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier.\n sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor.\n\n Notes\n -----\n The default values for the parameters controlling the size of the trees\n (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and\n unpruned trees which can potentially be very large on some data sets. To\n reduce memory consumption, the complexity and size of the trees should be\n controlled by setting those parameter values.\n\n References\n ----------\n\n .. [1] P. Geurts, D. Ernst., and L. Wehenkel, \"Extremely randomized trees\",\n Machine Learning, 63(1), 3-42, 2006.\n\n Examples\n --------\n >>> from sklearn.datasets import load_iris\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.ensemble import BaggingClassifier\n >>> from sklearn.tree import ExtraTreeClassifier\n >>> X, y = load_iris(return_X_y=True)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> extra_tree = ExtraTreeClassifier(random_state=0)\n >>> cls = BaggingClassifier(extra_tree, random_state=0).fit(\n ... X_train, y_train)\n >>> cls.score(X_test, y_test)\n 0.8947...\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, *,\n criterion=\"gini\",\n splitter=\"random\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.,\n max_features=\"auto\",\n random_state=None,\n max_leaf_nodes=None,\n min_impurity_decrease=0.,\n min_impurity_split=None,\n class_weight=None,\n ccp_alpha=0.0):\n super().__init__(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n class_weight=class_weight,\n min_impurity_decrease=min_impurity_decrease,\n min_impurity_split=min_impurity_split,\n random_state=random_state,\n ccp_alpha=ccp_alpha)\n\n\nclass ExtraTreeRegressor(DecisionTreeRegressor):\n \"\"\"An extremely randomized tree regressor.\n\n Extra-trees differ from classic decision trees in the way they are built.\n When looking for the best split to separate the samples of a node into two\n groups, random splits are drawn for each of the `max_features` randomly\n selected features and the best split among those is chosen. When\n `max_features` is set 1, this amounts to building a totally random\n decision tree.\n\n Warning: Extra-trees should only be used within ensemble methods.\n\n Read more in the :ref:`User Guide <tree>`.\n\n Parameters\n ----------\n criterion : {\"mse\", \"friedman_mse\", \"mae\"}, default=\"mse\"\n The function to measure the quality of a split. Supported criteria\n are \"mse\" for the mean squared error, which is equal to variance\n reduction as feature selection criterion and \"mae\" for the mean\n absolute error.\n\n .. versionadded:: 0.18\n Mean Absolute Error (MAE) criterion.\n\n .. versionadded:: 0.24\n Poisson deviance criterion.\n\n splitter : {\"random\", \"best\"}, default=\"random\"\n The strategy used to choose the split at each node. Supported\n strategies are \"best\" to choose the best split and \"random\" to choose\n the best random split.\n\n max_depth : int, default=None\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n\n min_samples_split : int or float, default=2\n The minimum number of samples required to split an internal node:\n\n - If int, then consider `min_samples_split` as the minimum number.\n - If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_samples_leaf : int or float, default=1\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` training samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression.\n\n - If int, then consider `min_samples_leaf` as the minimum number.\n - If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n .. versionchanged:: 0.18\n Added float values for fractions.\n\n min_weight_fraction_leaf : float, default=0.0\n The minimum weighted fraction of the sum total of weights (of all\n the input samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided.\n\n max_features : int, float, {\"auto\", \"sqrt\", \"log2\"} or None, default=\"auto\"\n The number of features to consider when looking for the best split:\n\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=n_features`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n random_state : int, RandomState instance or None, default=None\n Used to pick randomly the `max_features` used at each split.\n See :term:`Glossary <random_state>` for details.\n\n min_impurity_decrease : float, default=0.0\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of samples, ``N_t`` is the number of\n samples at the current node, ``N_t_L`` is the number of samples in the\n left child, and ``N_t_R`` is the number of samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n .. versionadded:: 0.19\n\n min_impurity_split : float, default=None\n Threshold for early stopping in tree growth. A node will split\n if its impurity is above the threshold, otherwise it is a leaf.\n\n .. deprecated:: 0.19\n ``min_impurity_split`` has been deprecated in favor of\n ``min_impurity_decrease`` in 0.19. The default value of\n ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it\n will be removed in 1.0 (renaming of 0.25).\n Use ``min_impurity_decrease`` instead.\n\n max_leaf_nodes : int, default=None\n Grow a tree with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n\n ccp_alpha : non-negative float, default=0.0\n Complexity parameter used for Minimal Cost-Complexity Pruning. The\n subtree with the largest cost complexity that is smaller than\n ``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n :ref:`minimal_cost_complexity_pruning` for details.\n\n .. versionadded:: 0.22\n\n Attributes\n ----------\n max_features_ : int\n The inferred value of max_features.\n\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n feature_importances_ : ndarray of shape (n_features,)\n Return impurity-based feature importances (the higher, the more\n important the feature).\n\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n\n n_outputs_ : int\n The number of outputs when ``fit`` is performed.\n\n tree_ : Tree instance\n The underlying Tree object. Please refer to\n ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and\n :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`\n for basic usage of these attributes.\n\n See Also\n --------\n ExtraTreeClassifier : An extremely randomized tree classifier.\n sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier.\n sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor.\n\n Notes\n -----\n The default values for the parameters controlling the size of the trees\n (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and\n unpruned trees which can potentially be very large on some data sets. To\n reduce memory consumption, the complexity and size of the trees should be\n controlled by setting those parameter values.\n\n References\n ----------\n\n .. [1] P. Geurts, D. Ernst., and L. Wehenkel, \"Extremely randomized trees\",\n Machine Learning, 63(1), 3-42, 2006.\n\n Examples\n --------\n >>> from sklearn.datasets import load_diabetes\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.ensemble import BaggingRegressor\n >>> from sklearn.tree import ExtraTreeRegressor\n >>> X, y = load_diabetes(return_X_y=True)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> extra_tree = ExtraTreeRegressor(random_state=0)\n >>> reg = BaggingRegressor(extra_tree, random_state=0).fit(\n ... X_train, y_train)\n >>> reg.score(X_test, y_test)\n 0.33...\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, *,\n criterion=\"mse\",\n splitter=\"random\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.,\n max_features=\"auto\",\n random_state=None,\n min_impurity_decrease=0.,\n min_impurity_split=None,\n max_leaf_nodes=None,\n ccp_alpha=0.0):\n super().__init__(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n min_impurity_decrease=min_impurity_decrease,\n min_impurity_split=min_impurity_split,\n random_state=random_state,\n ccp_alpha=ccp_alpha)\n", "# -*- coding: utf-8 -*-\n\n\"\"\"\nLeaflet GeoJson and miscellaneous features.\n\n\"\"\"\n\nimport json\nimport warnings\nimport functools\nimport operator\n\nfrom branca.colormap import LinearColormap, StepColormap\nfrom branca.element import (Element, Figure, JavascriptLink, MacroElement)\nfrom branca.utilities import color_brewer\n\nfrom folium.elements import JSCSSMixin\nfrom folium.folium import Map\nfrom folium.map import (FeatureGroup, Icon, Layer, Marker, Tooltip)\nfrom folium.utilities import (\n validate_locations,\n _parse_size,\n get_bounds,\n image_to_url,\n none_max,\n none_min,\n get_obj_in_upper_tree,\n parse_options,\n camelize\n)\nfrom folium.vector_layers import Circle, CircleMarker, PolyLine, path_options\n\nfrom jinja2 import Template\n\nimport numpy as np\n\nimport requests\n\n\nclass RegularPolygonMarker(JSCSSMixin, Marker):\n \"\"\"\n Custom markers using the Leaflet Data Vis Framework.\n\n Parameters\n ----------\n location: tuple or list\n Latitude and Longitude of Marker (Northing, Easting)\n number_of_sides: int, default 4\n Number of polygon sides\n rotation: int, default 0\n Rotation angle in degrees\n radius: int, default 15\n Marker radius, in pixels\n popup: string or Popup, optional\n Input text or visualization for object displayed when clicking.\n tooltip: str or folium.Tooltip, optional\n Display a text when hovering over the object.\n **kwargs:\n See vector layers path_options for additional arguments.\n\n https://humangeo.github.io/leaflet-dvf/\n\n \"\"\"\n _template = Template(u\"\"\"\n {% macro script(this, kwargs) %}\n var {{ this.get_name() }} = new L.RegularPolygonMarker(\n {{ this.location|tojson }},\n {{ this.options|tojson }}\n ).addTo({{ this._parent.get_name() }});\n {% endmacro %}\n \"\"\")\n\n default_js = [\n ('dvf_js',\n 'https://cdnjs.cloudflare.com/ajax/libs/leaflet-dvf/0.3.0/leaflet-dvf.markers.min.js'),\n ]\n\n def __init__(self, location, number_of_sides=4, rotation=0, radius=15,\n popup=None, tooltip=None, **kwargs):\n super(RegularPolygonMarker, self).__init__(\n location,\n popup=popup, tooltip=tooltip\n )\n self._name = 'RegularPolygonMarker'\n self.options = path_options(**kwargs)\n self.options.update(parse_options(\n number_of_sides=number_of_sides,\n rotation=rotation,\n radius=radius,\n ))\n\n\nclass Vega(JSCSSMixin, Element):\n \"\"\"\n Creates a Vega chart element.\n\n Parameters\n ----------\n data: JSON-like str or object\n The Vega description of the chart.\n It can also be any object that has a method `to_json`,\n so that you can (for instance) provide a `vincent` chart.\n width: int or str, default None\n The width of the output element.\n If None, either data['width'] (if available) or '100%' will be used.\n Ex: 120, '120px', '80%'\n height: int or str, default None\n The height of the output element.\n If None, either data['width'] (if available) or '100%' will be used.\n Ex: 120, '120px', '80%'\n left: int or str, default '0%'\n The horizontal distance of the output with respect to the parent\n HTML object. Ex: 120, '120px', '80%'\n top: int or str, default '0%'\n The vertical distance of the output with respect to the parent\n HTML object. Ex: 120, '120px', '80%'\n position: str, default 'relative'\n The `position` argument that the CSS shall contain.\n Ex: 'relative', 'absolute'\n\n \"\"\"\n _template = Template(u'')\n\n default_js = [\n ('d3',\n 'https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js'),\n ('vega',\n 'https://cdnjs.cloudflare.com/ajax/libs/vega/1.4.3/vega.min.js'),\n ('jquery',\n 'https://code.jquery.com/jquery-2.1.0.min.js'),\n ]\n\n def __init__(self, data, width=None, height=None,\n left='0%', top='0%', position='relative'):\n super(Vega, self).__init__()\n self._name = 'Vega'\n self.data = data.to_json() if hasattr(data, 'to_json') else data\n if isinstance(self.data, str):\n self.data = json.loads(self.data)\n\n # Size Parameters.\n self.width = _parse_size(self.data.get('width', '100%') if\n width is None else width)\n self.height = _parse_size(self.data.get('height', '100%') if\n height is None else height)\n self.left = _parse_size(left)\n self.top = _parse_size(top)\n self.position = position\n\n def render(self, **kwargs):\n \"\"\"Renders the HTML representation of the element.\"\"\"\n super().render(**kwargs)\n\n self.json = json.dumps(self.data)\n\n self._parent.html.add_child(Element(Template(\"\"\"\n <div id=\"{{this.get_name()}}\"></div>\n \"\"\").render(this=self, kwargs=kwargs)), name=self.get_name())\n\n self._parent.script.add_child(Element(Template(\"\"\"\n vega_parse({{this.json}},{{this.get_name()}});\n \"\"\").render(this=self)), name=self.get_name())\n\n figure = self.get_root()\n assert isinstance(figure, Figure), ('You cannot render this Element '\n 'if it is not in a Figure.')\n\n figure.header.add_child(Element(Template(\"\"\"\n <style> #{{this.get_name()}} {\n position : {{this.position}};\n width : {{this.width[0]}}{{this.width[1]}};\n height: {{this.height[0]}}{{this.height[1]}};\n left: {{this.left[0]}}{{this.left[1]}};\n top: {{this.top[0]}}{{this.top[1]}};\n </style>\n \"\"\").render(this=self, **kwargs)), name=self.get_name())\n\n figure.script.add_child(\n Template(\"\"\"function vega_parse(spec, div) {\n vg.parse.spec(spec, function(chart) { chart({el:div}).update(); });}\"\"\"), # noqa\n name='vega_parse')\n\n\nclass VegaLite(Element):\n \"\"\"\n Creates a Vega-Lite chart element.\n\n Parameters\n ----------\n data: JSON-like str or object\n The Vega-Lite description of the chart.\n It can also be any object that has a method `to_json`,\n so that you can (for instance) provide an `Altair` chart.\n width: int or str, default None\n The width of the output element.\n If None, either data['width'] (if available) or '100%' will be used.\n Ex: 120, '120px', '80%'\n height: int or str, default None\n The height of the output element.\n If None, either data['width'] (if available) or '100%' will be used.\n Ex: 120, '120px', '80%'\n left: int or str, default '0%'\n The horizontal distance of the output with respect to the parent\n HTML object. Ex: 120, '120px', '80%'\n top: int or str, default '0%'\n The vertical distance of the output with respect to the parent\n HTML object. Ex: 120, '120px', '80%'\n position: str, default 'relative'\n The `position` argument that the CSS shall contain.\n Ex: 'relative', 'absolute'\n\n \"\"\"\n _template = Template(u'')\n\n def __init__(self, data, width=None, height=None,\n left='0%', top='0%', position='relative'):\n super(self.__class__, self).__init__()\n self._name = 'VegaLite'\n self.data = data.to_json() if hasattr(data, 'to_json') else data\n if isinstance(self.data, str):\n self.data = json.loads(self.data)\n\n self.json = json.dumps(self.data)\n\n # Size Parameters.\n self.width = _parse_size(self.data.get('width', '100%') if\n width is None else width)\n self.height = _parse_size(self.data.get('height', '100%') if\n height is None else height)\n self.left = _parse_size(left)\n self.top = _parse_size(top)\n self.position = position\n\n def render(self, **kwargs):\n \"\"\"Renders the HTML representation of the element.\"\"\"\n vegalite_major_version = self._get_vegalite_major_versions(self.data)\n\n self._parent.html.add_child(Element(Template(\"\"\"\n <div id=\"{{this.get_name()}}\"></div>\n \"\"\").render(this=self, kwargs=kwargs)), name=self.get_name())\n\n figure = self.get_root()\n assert isinstance(figure, Figure), ('You cannot render this Element '\n 'if it is not in a Figure.')\n\n figure.header.add_child(Element(Template(\"\"\"\n <style> #{{this.get_name()}} {\n position : {{this.position}};\n width : {{this.width[0]}}{{this.width[1]}};\n height: {{this.height[0]}}{{this.height[1]}};\n left: {{this.left[0]}}{{this.left[1]}};\n top: {{this.top[0]}}{{this.top[1]}};\n </style>\n \"\"\").render(this=self, **kwargs)), name=self.get_name())\n\n if vegalite_major_version == '1':\n self._embed_vegalite_v1(figure)\n elif vegalite_major_version == '2':\n self._embed_vegalite_v2(figure)\n elif vegalite_major_version == '3':\n self._embed_vegalite_v3(figure)\n else:\n # Version 2 is assumed as the default, if no version is given in the schema.\n self._embed_vegalite_v2(figure)\n\n def _get_vegalite_major_versions(self, spec):\n try:\n schema = spec['$schema']\n except KeyError:\n major_version = None\n else:\n major_version = schema.split('/')[-1].split('.')[0].lstrip('v')\n\n return major_version\n\n def _embed_vegalite_v3(self, figure):\n self._vega_embed()\n\n figure.header.add_child(JavascriptLink('https://cdn.jsdelivr.net/npm/vega@4'), name='vega')\n figure.header.add_child(JavascriptLink('https://cdn.jsdelivr.net/npm/vega-lite@3'), name='vega-lite')\n figure.header.add_child(JavascriptLink('https://cdn.jsdelivr.net/npm/vega-embed@3'), name='vega-embed')\n\n def _embed_vegalite_v2(self, figure):\n self._vega_embed()\n\n figure.header.add_child(JavascriptLink('https://cdn.jsdelivr.net/npm/vega@3'), name='vega')\n figure.header.add_child(JavascriptLink('https://cdn.jsdelivr.net/npm/vega-lite@2'), name='vega-lite')\n figure.header.add_child(JavascriptLink('https://cdn.jsdelivr.net/npm/vega-embed@3'), name='vega-embed')\n\n def _vega_embed(self):\n self._parent.script.add_child(Element(Template(\"\"\"\n vegaEmbed({{this.get_name()}}, {{this.json}})\n .then(function(result) {})\n .catch(console.error);\n \"\"\").render(this=self)), name=self.get_name())\n\n def _embed_vegalite_v1(self, figure):\n self._parent.script.add_child(Element(Template(\"\"\"\n var embedSpec = {\n mode: \"vega-lite\",\n spec: {{this.json}}\n };\n vg.embed(\n {{this.get_name()}}, embedSpec, function(error, result) {}\n );\n \"\"\").render(this=self)), name=self.get_name())\n\n figure.header.add_child(JavascriptLink('https://d3js.org/d3.v3.min.js'), name='d3')\n figure.header.add_child(JavascriptLink('https://cdnjs.cloudflare.com/ajax/libs/vega/2.6.5/vega.js'), name='vega') # noqa\n figure.header.add_child(JavascriptLink('https://cdnjs.cloudflare.com/ajax/libs/vega-lite/1.3.1/vega-lite.js'), name='vega-lite') # noqa\n figure.header.add_child(JavascriptLink('https://cdnjs.cloudflare.com/ajax/libs/vega-embed/2.2.0/vega-embed.js'), name='vega-embed') # noqa\n\n\nclass GeoJson(Layer):\n \"\"\"\n Creates a GeoJson object for plotting into a Map.\n\n Parameters\n ----------\n data: file, dict or str.\n The GeoJSON data you want to plot.\n * If file, then data will be read in the file and fully\n embedded in Leaflet's JavaScript.\n * If dict, then data will be converted to JSON and embedded\n in the JavaScript.\n * If str, then data will be passed to the JavaScript as-is.\n * If `__geo_interface__` is available, the `__geo_interface__`\n dictionary will be serialized to JSON and\n reprojected if `to_crs` is available.\n style_function: function, default None\n Function mapping a GeoJson Feature to a style dict.\n highlight_function: function, default None\n Function mapping a GeoJson Feature to a style dict for mouse events.\n name : string, default None\n The name of the Layer, as it will appear in LayerControls\n overlay : bool, default True\n Adds the layer as an optional overlay (True) or the base layer (False).\n control : bool, default True\n Whether the Layer will be included in LayerControls\n show: bool, default True\n Whether the layer will be shown on opening (only for overlays).\n smooth_factor: float, default None\n How much to simplify the polyline on each zoom level. More means\n better performance and smoother look, and less means more accurate\n representation. Leaflet defaults to 1.0.\n tooltip: GeoJsonTooltip, Tooltip or str, default None\n Display a text when hovering over the object. Can utilize the data,\n see folium.GeoJsonTooltip for info on how to do that.\n popup: GeoJsonPopup, optional\n Show a different popup for each feature by passing a GeoJsonPopup object.\n marker: Circle, CircleMarker or Marker, optional\n If your data contains Point geometry, you can format the markers by passing a Cirle,\n CircleMarker or Marker object with your wanted options. The `style_function` and\n `highlight_function` will also target the marker object you passed.\n embed: bool, default True\n Whether to embed the data in the html file or not. Note that disabling\n embedding is only supported if you provide a file link or URL.\n zoom_on_click: bool, default False\n Set to True to enable zooming in on a geometry when clicking on it.\n\n Examples\n --------\n >>> # Providing filename that shall be embedded.\n >>> GeoJson('foo.json')\n >>> # Providing filename that shall not be embedded.\n >>> GeoJson('foo.json', embed=False)\n >>> # Providing dict.\n >>> GeoJson(json.load(open('foo.json')))\n >>> # Providing string.\n >>> GeoJson(open('foo.json').read())\n\n >>> # Provide a style_function that color all states green but Alabama.\n >>> style_function = lambda x: {'fillColor': '#0000ff' if\n ... x['properties']['name']=='Alabama' else\n ... '#00ff00'}\n >>> GeoJson(geojson, style_function=style_function)\n\n \"\"\"\n _template = Template(u\"\"\"\n {% macro script(this, kwargs) %}\n {%- if this.style %}\n function {{ this.get_name() }}_styler(feature) {\n switch({{ this.feature_identifier }}) {\n {%- for style, ids_list in this.style_map.items() if not style == 'default' %}\n {% for id_val in ids_list %}case {{ id_val|tojson }}: {% endfor %}\n return {{ style }};\n {%- endfor %}\n default:\n return {{ this.style_map['default'] }};\n }\n }\n {%- endif %}\n {%- if this.highlight %}\n function {{ this.get_name() }}_highlighter(feature) {\n switch({{ this.feature_identifier }}) {\n {%- for style, ids_list in this.highlight_map.items() if not style == 'default' %}\n {% for id_val in ids_list %}case {{ id_val|tojson }}: {% endfor %}\n return {{ style }};\n {%- endfor %}\n default:\n return {{ this.highlight_map['default'] }};\n }\n }\n {%- endif %}\n\n {%- if this.marker %}\n function {{ this.get_name() }}_pointToLayer(feature, latlng) {\n var opts = {{ this.marker.options | tojson | safe }};\n {% if this.marker._name == 'Marker' and this.marker.icon %}\n const iconOptions = {{ this.marker.icon.options | tojson | safe }}\n const iconRootAlias = L{%- if this.marker.icon._name == \"Icon\" %}.AwesomeMarkers{%- endif %}\n opts.icon = new iconRootAlias.{{ this.marker.icon._name }}(iconOptions)\n {% endif %}\n {%- if this.style_function %}\n let style = {{ this.get_name()}}_styler(feature)\n Object.assign({%- if this.marker.icon -%}opts.icon.options{%- else -%} opts {%- endif -%}, style)\n {% endif %}\n return new L.{{this.marker._name}}(latlng, opts)\n }\n {%- endif %}\n\n function {{this.get_name()}}_onEachFeature(feature, layer) {\n layer.on({\n {%- if this.highlight %}\n mouseout: function(e) {\n if(typeof e.target.setStyle === \"function\"){\n {{ this.get_name() }}.resetStyle(e.target);\n }\n },\n mouseover: function(e) {\n if(typeof e.target.setStyle === \"function\"){\n const highlightStyle = {{ this.get_name() }}_highlighter(e.target.feature)\n e.target.setStyle(highlightStyle);\n }\n },\n {%- endif %}\n {%- if this.zoom_on_click %}\n click: function(e) {\n if (typeof e.target.getBounds === 'function') {\n {{ this.parent_map.get_name() }}.fitBounds(e.target.getBounds());\n }\n else if (typeof e.target.getLatLng === 'function'){\n let zoom = {{ this.parent_map.get_name() }}.getZoom()\n zoom = zoom > 12 ? zoom : zoom + 1\n {{ this.parent_map.get_name() }}.flyTo(e.target.getLatLng(), zoom)\n }\n }\n {%- endif %}\n });\n };\n var {{ this.get_name() }} = L.geoJson(null, {\n {%- if this.smooth_factor is not none %}\n smoothFactor: {{ this.smooth_factor|tojson }},\n {%- endif %}\n onEachFeature: {{ this.get_name() }}_onEachFeature,\n {% if this.style %}\n style: {{ this.get_name() }}_styler,\n {%- endif %}\n {%- if this.marker %}\n pointToLayer: {{ this.get_name() }}_pointToLayer\n {%- endif %}\n });\n\n function {{ this.get_name() }}_add (data) {\n {{ this.get_name() }}\n .addData(data)\n .addTo({{ this._parent.get_name() }});\n }\n {%- if this.embed %}\n {{ this.get_name() }}_add({{ this.data|tojson }});\n {%- else %}\n $.ajax({{ this.embed_link|tojson }}, {dataType: 'json', async: false})\n .done({{ this.get_name() }}_add);\n {%- endif %}\n\n {% endmacro %}\n \"\"\") # noqa\n\n def __init__(self, data, style_function=None, highlight_function=None, # noqa\n name=None, overlay=True, control=True, show=True,\n smooth_factor=None, tooltip=None, embed=True, popup=None,\n zoom_on_click=False, marker=None):\n super(GeoJson, self).__init__(name=name, overlay=overlay,\n control=control, show=show)\n self._name = 'GeoJson'\n self.embed = embed\n self.embed_link = None\n self.json = None\n self.parent_map = None\n self.smooth_factor = smooth_factor\n self.style = style_function is not None\n self.highlight = highlight_function is not None\n self.zoom_on_click = zoom_on_click\n if marker:\n if not isinstance(marker, (Circle, CircleMarker, Marker)):\n raise TypeError(\"Only Marker, Circle, and CircleMarker are supported as GeoJson marker types.\")\n self.marker = marker\n\n self.data = self.process_data(data)\n\n if self.style or self.highlight:\n self.convert_to_feature_collection()\n if self.style:\n self._validate_function(style_function, 'style_function')\n self.style_function = style_function\n self.style_map = {}\n if self.highlight:\n self._validate_function(highlight_function, 'highlight_function')\n self.highlight_function = highlight_function\n self.highlight_map = {}\n self.feature_identifier = self.find_identifier()\n\n if isinstance(tooltip, (GeoJsonTooltip, Tooltip)):\n self.add_child(tooltip)\n elif tooltip is not None:\n self.add_child(Tooltip(tooltip))\n if isinstance(popup, (GeoJsonPopup)):\n self.add_child(popup)\n\n def process_data(self, data):\n \"\"\"Convert an unknown data input into a geojson dictionary.\"\"\"\n if isinstance(data, dict):\n self.embed = True\n return data\n elif isinstance(data, str):\n if data.lower().startswith(('http:', 'ftp:', 'https:')):\n if not self.embed:\n self.embed_link = data\n return requests.get(data).json()\n elif data.lstrip()[0] in '[{': # This is a GeoJSON inline string\n self.embed = True\n return json.loads(data)\n else: # This is a filename\n if not self.embed:\n self.embed_link = data\n with open(data) as f:\n return json.loads(f.read())\n elif hasattr(data, '__geo_interface__'):\n self.embed = True\n if hasattr(data, 'to_crs'):\n data = data.to_crs('EPSG:4326')\n return json.loads(json.dumps(data.__geo_interface__))\n else:\n raise ValueError('Cannot render objects with any missing geometries'\n ': {!r}'.format(data))\n\n def convert_to_feature_collection(self):\n \"\"\"Convert data into a FeatureCollection if it is not already.\"\"\"\n if self.data['type'] == 'FeatureCollection':\n return\n if not self.embed:\n raise ValueError(\n 'Data is not a FeatureCollection, but it should be to apply '\n 'style or highlight. Because `embed=False` it cannot be '\n 'converted into one.\\nEither change your geojson data to a '\n 'FeatureCollection, set `embed=True` or disable styling.')\n # Catch case when GeoJSON is just a single Feature or a geometry.\n if 'geometry' not in self.data.keys():\n # Catch case when GeoJSON is just a geometry.\n self.data = {'type': 'Feature', 'geometry': self.data}\n self.data = {'type': 'FeatureCollection', 'features': [self.data]}\n\n def _validate_function(self, func, name):\n \"\"\"\n Tests `self.style_function` and `self.highlight_function` to ensure\n they are functions returning dictionaries.\n \"\"\"\n test_feature = self.data['features'][0]\n if not callable(func) or not isinstance(func(test_feature), dict):\n raise ValueError('{} should be a function that accepts items from '\n 'data[\\'features\\'] and returns a dictionary.'\n .format(name))\n\n def find_identifier(self):\n \"\"\"Find a unique identifier for each feature, create it if needed.\n\n According to the GeoJSON specs a feature:\n - MAY have an 'id' field with a string or numerical value.\n - MUST have a 'properties' field. The content can be any json object\n or even null.\n\n \"\"\"\n feats = self.data['features']\n # Each feature has an 'id' field with a unique value.\n unique_ids = set(feat.get('id', None) for feat in feats)\n if None not in unique_ids and len(unique_ids) == len(feats):\n return 'feature.id'\n # Each feature has a unique string or int property.\n if all(isinstance(feat.get('properties', None), dict) for feat in feats):\n for key in feats[0]['properties']:\n unique_values = set(\n feat['properties'].get(key, None) for feat in feats\n if isinstance(feat['properties'].get(key, None), (str, int))\n )\n if len(unique_values) == len(feats):\n return 'feature.properties.{}'.format(key)\n # We add an 'id' field with a unique value to the data.\n if self.embed:\n for i, feature in enumerate(feats):\n feature['id'] = str(i)\n return 'feature.id'\n raise ValueError(\n 'There is no unique identifier for each feature and because '\n '`embed=False` it cannot be added. Consider adding an `id` '\n 'field to your geojson data or set `embed=True`. '\n )\n\n def _get_self_bounds(self):\n \"\"\"\n Computes the bounds of the object itself (not including it's children)\n in the form [[lat_min, lon_min], [lat_max, lon_max]].\n\n \"\"\"\n return get_bounds(self.data, lonlat=True)\n\n def render(self, **kwargs):\n self.parent_map = get_obj_in_upper_tree(self, Map)\n if self.style or self.highlight:\n mapper = GeoJsonStyleMapper(self.data, self.feature_identifier,\n self)\n if self.style:\n self.style_map = mapper.get_style_map(self.style_function)\n if self.highlight:\n self.highlight_map = mapper.get_highlight_map(\n self.highlight_function)\n super(GeoJson, self).render()\n\n\nclass GeoJsonStyleMapper:\n \"\"\"Create dicts that map styling to GeoJson features.\n\n Used in the GeoJson class. Users don't have to call this class directly.\n \"\"\"\n\n def __init__(self, data, feature_identifier, geojson_obj):\n self.data = data\n self.feature_identifier = feature_identifier\n self.geojson_obj = geojson_obj\n\n def get_style_map(self, style_function):\n \"\"\"Return a dict that maps style parameters to features.\"\"\"\n return self._create_mapping(style_function, 'style')\n\n def get_highlight_map(self, highlight_function):\n \"\"\"Return a dict that maps highlight parameters to features.\"\"\"\n return self._create_mapping(highlight_function, 'highlight')\n\n def _create_mapping(self, func, switch):\n \"\"\"Internal function to create the mapping.\"\"\"\n mapping = {}\n for feature in self.data['features']:\n content = func(feature)\n if switch == 'style':\n for key, value in content.items():\n if isinstance(value, MacroElement):\n # Make sure objects are rendered:\n if value._parent is None:\n value._parent = self.geojson_obj\n value.render()\n # Replace objects with their Javascript var names:\n content[key] = \"{{'\" + value.get_name() + \"'}}\"\n key = self._to_key(content)\n mapping.setdefault(key, []).append(self.get_feature_id(feature))\n self._set_default_key(mapping)\n return mapping\n\n def get_feature_id(self, feature):\n \"\"\"Return a value identifying the feature.\"\"\"\n fields = self.feature_identifier.split('.')[1:]\n return functools.reduce(operator.getitem, fields, feature)\n\n @staticmethod\n def _to_key(d):\n \"\"\"Convert dict to str and enable Jinja2 template syntax.\"\"\"\n as_str = json.dumps(d, sort_keys=True)\n return as_str.replace('\"{{', '{{').replace('}}\"', '}}')\n\n @staticmethod\n def _set_default_key(mapping):\n \"\"\"Replace the field with the most features with a 'default' field.\"\"\"\n key_longest = sorted([(len(v), k) for k, v in mapping.items()],\n reverse=True)[0][1]\n mapping['default'] = key_longest\n del (mapping[key_longest])\n\n\nclass TopoJson(JSCSSMixin, Layer):\n \"\"\"\n Creates a TopoJson object for plotting into a Map.\n\n Parameters\n ----------\n data: file, dict or str.\n The TopoJSON data you want to plot.\n * If file, then data will be read in the file and fully\n embedded in Leaflet's JavaScript.\n * If dict, then data will be converted to JSON and embedded\n in the JavaScript.\n * If str, then data will be passed to the JavaScript as-is.\n object_path: str\n The path of the desired object into the TopoJson structure.\n Ex: 'objects.myobject'.\n style_function: function, default None\n A function mapping a TopoJson geometry to a style dict.\n name : string, default None\n The name of the Layer, as it will appear in LayerControls\n overlay : bool, default False\n Adds the layer as an optional overlay (True) or the base layer (False).\n control : bool, default True\n Whether the Layer will be included in LayerControls.\n show: bool, default True\n Whether the layer will be shown on opening (only for overlays).\n smooth_factor: float, default None\n How much to simplify the polyline on each zoom level. More means\n better performance and smoother look, and less means more accurate\n representation. Leaflet defaults to 1.0.\n tooltip: GeoJsonTooltip, Tooltip or str, default None\n Display a text when hovering over the object. Can utilize the data,\n see folium.GeoJsonTooltip for info on how to do that.\n\n Examples\n --------\n >>> # Providing file that shall be embeded.\n >>> TopoJson(open('foo.json'), 'object.myobject')\n >>> # Providing filename that shall not be embeded.\n >>> TopoJson('foo.json', 'object.myobject')\n >>> # Providing dict.\n >>> TopoJson(json.load(open('foo.json')), 'object.myobject')\n >>> # Providing string.\n >>> TopoJson(open('foo.json').read(), 'object.myobject')\n\n >>> # Provide a style_function that color all states green but Alabama.\n >>> style_function = lambda x: {'fillColor': '#0000ff' if\n ... x['properties']['name']=='Alabama' else\n ... '#00ff00'}\n >>> TopoJson(topo_json, 'object.myobject', style_function=style_function)\n\n \"\"\"\n _template = Template(u\"\"\"\n {% macro script(this, kwargs) %}\n var {{ this.get_name() }}_data = {{ this.data|tojson }};\n var {{ this.get_name() }} = L.geoJson(\n topojson.feature(\n {{ this.get_name() }}_data,\n {{ this.get_name() }}_data.{{ this.object_path }}\n ),\n {\n {%- if this.smooth_factor is not none %}\n smoothFactor: {{ this.smooth_factor|tojson }},\n {%- endif %}\n }\n ).addTo({{ this._parent.get_name() }});\n {{ this.get_name() }}.setStyle(function(feature) {\n return feature.properties.style;\n });\n {% endmacro %}\n \"\"\") # noqa\n\n default_js = [\n ('topojson',\n 'https://cdnjs.cloudflare.com/ajax/libs/topojson/1.6.9/topojson.min.js'),\n ]\n\n def __init__(self, data, object_path, style_function=None,\n name=None, overlay=True, control=True, show=True,\n smooth_factor=None, tooltip=None):\n super(TopoJson, self).__init__(name=name, overlay=overlay,\n control=control, show=show)\n self._name = 'TopoJson'\n\n if 'read' in dir(data):\n self.embed = True\n self.data = json.load(data)\n elif type(data) is dict:\n self.embed = True\n self.data = data\n else:\n self.embed = False\n self.data = data\n\n self.object_path = object_path\n\n if style_function is None:\n def style_function(x):\n return {}\n self.style_function = style_function\n\n self.smooth_factor = smooth_factor\n\n if isinstance(tooltip, (GeoJsonTooltip, Tooltip)):\n self.add_child(tooltip)\n elif tooltip is not None:\n self.add_child(Tooltip(tooltip))\n\n def style_data(self):\n \"\"\"Applies self.style_function to each feature of self.data.\"\"\"\n\n def recursive_get(data, keys):\n if len(keys):\n return recursive_get(data.get(keys[0]), keys[1:])\n else:\n return data\n\n geometries = recursive_get(self.data, self.object_path.split('.'))['geometries'] # noqa\n for feature in geometries:\n feature.setdefault('properties', {}).setdefault('style', {}).update(self.style_function(feature)) # noqa\n\n def render(self, **kwargs):\n \"\"\"Renders the HTML representation of the element.\"\"\"\n self.style_data()\n super(TopoJson, self).render(**kwargs)\n\n def get_bounds(self):\n \"\"\"\n Computes the bounds of the object itself (not including it's children)\n in the form [[lat_min, lon_min], [lat_max, lon_max]]\n\n \"\"\"\n if not self.embed:\n raise ValueError('Cannot compute bounds of non-embedded TopoJSON.')\n\n xmin, xmax, ymin, ymax = None, None, None, None\n\n for arc in self.data['arcs']:\n x, y = 0, 0\n for dx, dy in arc:\n x += dx\n y += dy\n xmin = none_min(x, xmin)\n xmax = none_max(x, xmax)\n ymin = none_min(y, ymin)\n ymax = none_max(y, ymax)\n return [\n [\n self.data['transform']['translate'][1] + self.data['transform']['scale'][1] * ymin, # noqa\n self.data['transform']['translate'][0] + self.data['transform']['scale'][0] * xmin # noqa\n ],\n [\n self.data['transform']['translate'][1] + self.data['transform']['scale'][1] * ymax, # noqa\n self.data['transform']['translate'][0] + self.data['transform']['scale'][0] * xmax # noqa\n ]\n ]\n\n\nclass GeoJsonDetail(MacroElement):\n\n \"\"\"\n Base class for GeoJsonTooltip and GeoJsonPopup to inherit methods and\n template structure from. Not for direct usage.\n\n \"\"\"\n base_template = u\"\"\"\n function(layer){\n let div = L.DomUtil.create('div');\n {% if this.fields %}\n let handleObject = feature=>typeof(feature)=='object' ? JSON.stringify(feature) : feature;\n let fields = {{ this.fields | tojson | safe }};\n let aliases = {{ this.aliases | tojson | safe }};\n let table = '<table>' +\n String(\n fields.map(\n (v,i)=>\n `<tr>{% if this.labels %}\n <th>${aliases[i]{% if this.localize %}.toLocaleString(){% endif %}}</th>\n {% endif %}\n <td>${handleObject(layer.feature.properties[v]){% if this.localize %}.toLocaleString(){% endif %}}</td>\n </tr>`).join(''))\n +'</table>';\n div.innerHTML=table;\n {% endif %}\n return div\n }\n \"\"\"\n\n def __init__(self, fields, aliases=None, labels=True, localize=False, style=None,\n class_name=\"geojsondetail\"):\n super(GeoJsonDetail, self).__init__()\n assert isinstance(fields, (list, tuple)), 'Please pass a list or ' \\\n 'tuple to fields.'\n if aliases is not None:\n assert isinstance(aliases, (list, tuple))\n assert len(fields) == len(aliases), 'fields and aliases must have' \\\n ' the same length.'\n assert isinstance(labels, bool), 'labels requires a boolean value.'\n assert isinstance(localize, bool), 'localize must be bool.'\n self._name = \"GeoJsonDetail\"\n self.fields = fields\n self.aliases = aliases if aliases is not None else fields\n self.labels = labels\n self.localize = localize\n self.class_name = class_name\n if style:\n assert isinstance(style, str), \\\n 'Pass a valid inline HTML style property string to style.'\n # noqa outside of type checking.\n self.style = style\n\n def warn_for_geometry_collections(self):\n \"\"\"Checks for GeoJson GeometryCollection features to warn user about incompatibility.\"\"\"\n geom_collections = [\n feature.get('properties') if feature.get('properties') is not None else key\n for key, feature in enumerate(self._parent.data['features'])\n if feature['geometry']['type'] == 'GeometryCollection'\n ]\n if any(geom_collections):\n warnings.warn(\n \"{} is not configured to render for GeoJson GeometryCollection geometries. \"\n \"Please consider reworking these features: {} to MultiPolygon for full functionality.\\n\"\n \"https://tools.ietf.org/html/rfc7946#page-9\".format(self._name, geom_collections), UserWarning)\n\n def render(self, **kwargs):\n \"\"\"Renders the HTML representation of the element.\"\"\"\n figure = self.get_root()\n if isinstance(self._parent, GeoJson):\n keys = tuple(self._parent.data['features'][0]['properties'].keys())\n self.warn_for_geometry_collections()\n elif isinstance(self._parent, TopoJson):\n obj_name = self._parent.object_path.split('.')[-1]\n keys = tuple(self._parent.data['objects'][obj_name][\n 'geometries'][0]['properties'].keys())\n else:\n raise TypeError('You cannot add a {} to anything other than a '\n 'GeoJson or TopoJson object.'.format(self._name))\n keys = tuple(x for x in keys if x not in ('style', 'highlight'))\n for value in self.fields:\n assert value in keys, ('The field {} is not available in the data. '\n 'Choose from: {}.'.format(value, keys))\n figure.header.add_child(Element(\n Template(u\"\"\"\n <style>\n .{{ this.class_name }} {\n {{ this.style }}\n }\n .{{ this.class_name }} table{\n margin: auto;\n }\n .{{ this.class_name }} tr{\n text-align: left;\n }\n .{{ this.class_name }} th{\n padding: 2px; padding-right: 8px;\n }\n </style>\n \"\"\").render(this=self)), name=self.get_name() + \"tablestyle\"\n )\n\n super(GeoJsonDetail, self).render()\n\n\nclass GeoJsonTooltip(GeoJsonDetail):\n \"\"\"\n Create a tooltip that uses data from either geojson or topojson.\n\n Parameters\n ----------\n fields: list or tuple.\n Labels of GeoJson/TopoJson 'properties' or GeoPandas GeoDataFrame\n columns you'd like to display.\n aliases: list/tuple of strings, same length/order as fields, default None.\n Optional aliases you'd like to display in the tooltip as field name\n instead of the keys of `fields`.\n labels: bool, default True.\n Set to False to disable displaying the field names or aliases.\n localize: bool, default False.\n This will use JavaScript's .toLocaleString() to format 'clean' values\n as strings for the user's location; i.e. 1,000,000.00 comma separators,\n float truncation, etc.\n Available for most of JavaScript's primitive types (any data you'll\n serve into the template).\n style: str, default None.\n HTML inline style properties like font and colors. Will be applied to\n a div with the text in it.\n sticky: bool, default True\n Whether the tooltip should follow the mouse.\n **kwargs: Assorted.\n These values will map directly to the Leaflet Options. More info\n available here: https://leafletjs.com/reference-1.6.0#tooltip\n\n Examples\n --------\n # Provide fields and aliases, with Style.\n >>> GeoJsonTooltip(\n >>> fields=['CNTY_NM', 'census-pop-2015', 'census-md-income-2015'],\n >>> aliases=['County', '2015 Census Population', '2015 Median Income'],\n >>> localize=True,\n >>> style=('background-color: grey; color: white; font-family:'\n >>> 'courier new; font-size: 24px; padding: 10px;')\n >>> )\n # Provide fields, with labels off and fixed tooltip positions.\n >>> GeoJsonTooltip(fields=('CNTY_NM',), labels=False, sticky=False)\n \"\"\"\n _template = Template(u\"\"\"\n {% macro script(this, kwargs) %}\n {{ this._parent.get_name() }}.bindTooltip(\"\"\" + GeoJsonDetail.base_template +\n u\"\"\",{{ this.tooltip_options | tojson | safe }});\n {% endmacro %}\n \"\"\")\n\n def __init__(self, fields, aliases=None, labels=True, localize=False,\n style=None, class_name='foliumtooltip', sticky=True, **kwargs):\n super(GeoJsonTooltip, self).__init__(\n fields=fields, aliases=aliases, labels=labels, localize=localize,\n style=style, class_name=class_name\n )\n self._name = 'GeoJsonTooltip'\n kwargs.update({'sticky': sticky, 'class_name': class_name})\n self.tooltip_options = {\n camelize(key): kwargs[key] for key in kwargs.keys()}\n\n\nclass GeoJsonPopup(GeoJsonDetail):\n \"\"\"\n Create a popup feature to bind to each element of a GeoJson layer based on\n its attributes.\n\n Parameters\n ----------\n fields: list or tuple.\n Labels of GeoJson/TopoJson 'properties' or GeoPandas GeoDataFrame\n columns you'd like to display.\n aliases: list/tuple of strings, same length/order as fields, default None.\n Optional aliases you'd like to display in the tooltip as field name\n instead of the keys of `fields`.\n labels: bool, default True.\n Set to False to disable displaying the field names or aliases.\n localize: bool, default False.\n This will use JavaScript's .toLocaleString() to format 'clean' values\n as strings for the user's location; i.e. 1,000,000.00 comma separators,\n float truncation, etc.\n *Available for most of JavaScript's primitive types (any data you'll\n serve into the template).\n style: str, default None.\n HTML inline style properties like font and colors. Will be applied to\n a div with the text in it.\n\n Examples\n ---\n gjson = folium.GeoJson(gdf).add_to(m)\n\n folium.features.GeoJsonPopup(fields=['NAME'],\n labels=False\n ).add_to(gjson)\n \"\"\"\n\n _template = Template(u\"\"\"\n {% macro script(this, kwargs) %}\n {{ this._parent.get_name() }}.bindPopup(\"\"\" + GeoJsonDetail.base_template +\n u\"\"\",{{ this.popup_options | tojson | safe }});\n {% endmacro %}\n \"\"\")\n\n def __init__(self, fields=None, aliases=None, labels=True,\n style=\"margin: auto;\", class_name='foliumpopup', localize=True,\n **kwargs):\n super(GeoJsonPopup, self).__init__(\n fields=fields, aliases=aliases, labels=labels, localize=localize,\n class_name=class_name, style=style)\n self._name = \"GeoJsonPopup\"\n kwargs.update({'class_name': self.class_name})\n self.popup_options = {\n camelize(key): value for key, value in kwargs.items()}\n\n\nclass Choropleth(FeatureGroup):\n \"\"\"Apply a GeoJSON overlay to the map.\n\n Plot a GeoJSON overlay on the base map. There is no requirement\n to bind data (passing just a GeoJSON plots a single-color overlay),\n but there is a data binding option to map your columnar data to\n different feature objects with a color scale.\n\n If data is passed as a Pandas DataFrame, the \"columns\" and \"key-on\"\n keywords must be included, the first to indicate which DataFrame\n columns to use, the second to indicate the layer in the GeoJSON\n on which to key the data. The 'columns' keyword does not need to be\n passed for a Pandas series.\n\n Colors are generated from color brewer (http://colorbrewer2.org/)\n sequential palettes. By default, linear binning is used between\n the min and the max of the values. Custom binning can be achieved\n with the `bins` parameter.\n\n TopoJSONs can be passed as \"geo_data\", but the \"topojson\" keyword must\n also be passed with the reference to the topojson objects to convert.\n See the topojson.feature method in the TopoJSON API reference:\n https://github.com/topojson/topojson/wiki/API-Reference\n\n\n Parameters\n ----------\n geo_data: string/object\n URL, file path, or data (json, dict, geopandas, etc) to your GeoJSON\n geometries\n data: Pandas DataFrame or Series, default None\n Data to bind to the GeoJSON.\n columns: dict or tuple, default None\n If the data is a Pandas DataFrame, the columns of data to be bound.\n Must pass column 1 as the key, and column 2 the values.\n key_on: string, default None\n Variable in the `geo_data` GeoJSON file to bind the data to. Must\n start with 'feature' and be in JavaScript objection notation.\n Ex: 'feature.id' or 'feature.properties.statename'.\n bins: int or sequence of scalars or str, default 6\n If `bins` is an int, it defines the number of equal-width\n bins between the min and the max of the values.\n If `bins` is a sequence, it directly defines the bin edges.\n For more information on this parameter, have a look at\n numpy.histogram function.\n fill_color: string, optional\n Area fill color, defaults to blue. Can pass a hex code, color name,\n or if you are binding data, one of the following color brewer palettes:\n 'BuGn', 'BuPu', 'GnBu', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'RdPu',\n 'YlGn', 'YlGnBu', 'YlOrBr', and 'YlOrRd'.\n nan_fill_color: string, default 'black'\n Area fill color for nan or missing values.\n Can pass a hex code, color name.\n fill_opacity: float, default 0.6\n Area fill opacity, range 0-1.\n nan_fill_opacity: float, default fill_opacity\n Area fill opacity for nan or missing values, range 0-1.\n line_color: string, default 'black'\n GeoJSON geopath line color.\n line_weight: int, default 1\n GeoJSON geopath line weight.\n line_opacity: float, default 1\n GeoJSON geopath line opacity, range 0-1.\n legend_name: string, default empty string\n Title for data legend.\n topojson: string, default None\n If using a TopoJSON, passing \"objects.yourfeature\" to the topojson\n keyword argument will enable conversion to GeoJSON.\n smooth_factor: float, default None\n How much to simplify the polyline on each zoom level. More means\n better performance and smoother look, and less means more accurate\n representation. Leaflet defaults to 1.0.\n highlight: boolean, default False\n Enable highlight functionality when hovering over a GeoJSON area.\n name : string, optional\n The name of the layer, as it will appear in LayerControls\n overlay : bool, default True\n Adds the layer as an optional overlay (True) or the base layer (False).\n control : bool, default True\n Whether the Layer will be included in LayerControls.\n show: bool, default True\n Whether the layer will be shown on opening (only for overlays).\n\n Returns\n -------\n GeoJSON data layer in obj.template_vars\n\n Examples\n --------\n >>> Choropleth(geo_data='us-states.json', line_color='blue',\n ... line_weight=3)\n >>> Choropleth(geo_data='geo.json', data=df,\n ... columns=['Data 1', 'Data 2'],\n ... key_on='feature.properties.myvalue',\n ... fill_color='PuBu',\n ... bins=[0, 20, 30, 40, 50, 60])\n >>> Choropleth(geo_data='countries.json',\n ... topojson='objects.countries')\n >>> Choropleth(geo_data='geo.json', data=df,\n ... columns=['Data 1', 'Data 2'],\n ... key_on='feature.properties.myvalue',\n ... fill_color='PuBu',\n ... bins=[0, 20, 30, 40, 50, 60],\n ... highlight=True)\n \"\"\"\n\n def __init__(self, geo_data, data=None, columns=None, key_on=None, # noqa\n bins=6, fill_color=None, nan_fill_color='black',\n fill_opacity=0.6, nan_fill_opacity=None, line_color='black',\n line_weight=1, line_opacity=1, name=None, legend_name='',\n overlay=True, control=True, show=True,\n topojson=None, smooth_factor=None, highlight=None,\n **kwargs):\n super(Choropleth, self).__init__(name=name, overlay=overlay,\n control=control, show=show)\n self._name = 'Choropleth'\n\n fill_color = fill_color or ('blue' if data is None else 'Blues')\n\n if data is not None and not color_brewer(fill_color):\n raise ValueError('Please pass a valid color brewer code to '\n 'fill_local. See docstring for valid codes.')\n\n if nan_fill_opacity is None:\n nan_fill_opacity = fill_opacity\n\n if 'threshold_scale' in kwargs:\n if kwargs['threshold_scale'] is not None:\n bins = kwargs['threshold_scale']\n warnings.warn(\n 'choropleth `threshold_scale` parameter is now depreciated '\n 'in favor of the `bins` parameter.', DeprecationWarning)\n\n # Create color_data dict\n if hasattr(data, 'set_index'):\n # This is a pd.DataFrame\n color_data = data.set_index(columns[0])[columns[1]].to_dict()\n elif hasattr(data, 'to_dict'):\n # This is a pd.Series\n color_data = data.to_dict()\n elif data:\n color_data = dict(data)\n else:\n color_data = None\n\n self.color_scale = None\n\n if color_data is not None and key_on is not None:\n real_values = np.array(list(color_data.values()))\n real_values = real_values[~np.isnan(real_values)]\n _, bin_edges = np.histogram(real_values, bins=bins)\n\n bins_min, bins_max = min(bin_edges), max(bin_edges)\n if np.any((real_values < bins_min) | (real_values > bins_max)):\n raise ValueError(\n 'All values are expected to fall into one of the provided '\n 'bins (or to be Nan). Please check the `bins` parameter '\n 'and/or your data.')\n\n # We add the colorscale\n nb_bins = len(bin_edges) - 1\n color_range = color_brewer(fill_color, n=nb_bins)\n self.color_scale = StepColormap(\n color_range,\n index=bin_edges,\n vmin=bins_min,\n vmax=bins_max,\n caption=legend_name)\n\n # then we 'correct' the last edge for numpy digitize\n # (we add a very small amount to fake an inclusive right interval)\n increasing = bin_edges[0] <= bin_edges[-1]\n bin_edges[-1] = np.nextafter(\n bin_edges[-1],\n (1 if increasing else -1) * np.inf)\n\n key_on = key_on[8:] if key_on.startswith('feature.') else key_on\n\n def get_by_key(obj, key):\n return (obj.get(key, None) if len(key.split('.')) <= 1 else\n get_by_key(obj.get(key.split('.')[0], None),\n '.'.join(key.split('.')[1:])))\n\n def color_scale_fun(x):\n key_of_x = get_by_key(x, key_on)\n if key_of_x is None:\n raise ValueError(\"key_on `{!r}` not found in GeoJSON.\".format(key_on))\n\n if key_of_x not in color_data.keys():\n return nan_fill_color, nan_fill_opacity\n\n value_of_x = color_data[key_of_x]\n if np.isnan(value_of_x):\n return nan_fill_color, nan_fill_opacity\n\n color_idx = np.digitize(value_of_x, bin_edges, right=False) - 1\n return color_range[color_idx], fill_opacity\n\n else:\n def color_scale_fun(x):\n return fill_color, fill_opacity\n\n def style_function(x):\n color, opacity = color_scale_fun(x)\n return {\n 'weight': line_weight,\n 'opacity': line_opacity,\n 'color': line_color,\n 'fillOpacity': opacity,\n 'fillColor': color\n }\n\n def highlight_function(x):\n return {\n 'weight': line_weight + 2,\n 'fillOpacity': fill_opacity + .2\n }\n\n if topojson:\n self.geojson = TopoJson(\n geo_data,\n topojson,\n style_function=style_function,\n smooth_factor=smooth_factor)\n else:\n self.geojson = GeoJson(\n geo_data,\n style_function=style_function,\n smooth_factor=smooth_factor,\n highlight_function=highlight_function if highlight else None)\n\n self.add_child(self.geojson)\n if self.color_scale:\n self.add_child(self.color_scale)\n\n def render(self, **kwargs):\n \"\"\"Render the GeoJson/TopoJson and color scale objects.\"\"\"\n if self.color_scale:\n # ColorMap needs Map as its parent\n assert isinstance(self._parent, Map), ('Choropleth must be added'\n ' to a Map object.')\n self.color_scale._parent = self._parent\n\n super(Choropleth, self).render(**kwargs)\n\n\nclass DivIcon(MacroElement):\n \"\"\"\n Represents a lightweight icon for markers that uses a simple `div`\n element instead of an image.\n\n Parameters\n ----------\n icon_size : tuple of 2 int\n Size of the icon image in pixels.\n icon_anchor : tuple of 2 int\n The coordinates of the \"tip\" of the icon\n (relative to its top left corner).\n The icon will be aligned so that this point is at the\n marker's geographical location.\n popup_anchor : tuple of 2 int\n The coordinates of the point from which popups will \"open\",\n relative to the icon anchor.\n class_name : string\n A custom class name to assign to the icon.\n Leaflet defaults is 'leaflet-div-icon' which draws a little white\n square with a shadow. We set it 'empty' in folium.\n html : string\n A custom HTML code to put inside the div element.\n\n See https://leafletjs.com/reference-1.6.0.html#divicon\n\n \"\"\"\n\n _template = Template(u\"\"\"\n {% macro script(this, kwargs) %}\n var {{ this.get_name() }} = L.divIcon({{ this.options|tojson }});\n {{this._parent.get_name()}}.setIcon({{this.get_name()}});\n {% endmacro %}\n \"\"\") # noqa\n\n def __init__(self, html=None, icon_size=None, icon_anchor=None,\n popup_anchor=None, class_name='empty'):\n super(DivIcon, self).__init__()\n self._name = 'DivIcon'\n self.options = parse_options(\n html=html,\n icon_size=icon_size,\n icon_anchor=icon_anchor,\n popup_anchor=popup_anchor,\n class_name=class_name,\n )\n\n\nclass LatLngPopup(MacroElement):\n \"\"\"\n When one clicks on a Map that contains a LatLngPopup,\n a popup is shown that displays the latitude and longitude of the pointer.\n\n \"\"\"\n _template = Template(u\"\"\"\n {% macro script(this, kwargs) %}\n var {{this.get_name()}} = L.popup();\n function latLngPop(e) {\n {{this.get_name()}}\n .setLatLng(e.latlng)\n .setContent(\"Latitude: \" + e.latlng.lat.toFixed(4) +\n \"<br>Longitude: \" + e.latlng.lng.toFixed(4))\n .openOn({{this._parent.get_name()}});\n }\n {{this._parent.get_name()}}.on('click', latLngPop);\n {% endmacro %}\n \"\"\") # noqa\n\n def __init__(self):\n super(LatLngPopup, self).__init__()\n self._name = 'LatLngPopup'\n\n\nclass ClickForMarker(MacroElement):\n \"\"\"\n When one clicks on a Map that contains a ClickForMarker,\n a Marker is created at the pointer's position.\n\n Parameters\n ----------\n popup: str, default None\n Text to display in the markers' popups.\n If None, the popups will display the marker's latitude and longitude.\n\n \"\"\"\n _template = Template(u\"\"\"\n {% macro script(this, kwargs) %}\n function newMarker(e){\n var new_mark = L.marker().setLatLng(e.latlng).addTo({{this._parent.get_name()}});\n new_mark.dragging.enable();\n new_mark.on('dblclick', function(e){ {{this._parent.get_name()}}.removeLayer(e.target)})\n var lat = e.latlng.lat.toFixed(4),\n lng = e.latlng.lng.toFixed(4);\n new_mark.bindPopup({{ this.popup }});\n };\n {{this._parent.get_name()}}.on('click', newMarker);\n {% endmacro %}\n \"\"\") # noqa\n\n def __init__(self, popup=None):\n super(ClickForMarker, self).__init__()\n self._name = 'ClickForMarker'\n\n if popup:\n self.popup = ''.join(['\"', popup, '\"'])\n else:\n self.popup = '\"Latitude: \" + lat + \"<br>Longitude: \" + lng '\n\n\nclass CustomIcon(Icon):\n \"\"\"\n Create a custom icon, based on an image.\n\n Parameters\n ----------\n icon_image : string, file or array-like object\n The data you want to use as an icon.\n * If string, it will be written directly in the output file.\n * If file, it's content will be converted as embedded in the\n output file.\n * If array-like, it will be converted to PNG base64 string\n and embedded in the output.\n icon_size : tuple of 2 int, optional\n Size of the icon image in pixels.\n icon_anchor : tuple of 2 int, optional\n The coordinates of the \"tip\" of the icon\n (relative to its top left corner).\n The icon will be aligned so that this point is at the\n marker's geographical location.\n shadow_image : string, file or array-like object, optional\n The data for the shadow image. If not specified,\n no shadow image will be created.\n shadow_size : tuple of 2 int, optional\n Size of the shadow image in pixels.\n shadow_anchor : tuple of 2 int, optional\n The coordinates of the \"tip\" of the shadow relative to its\n top left corner (the same as icon_anchor if not specified).\n popup_anchor : tuple of 2 int, optional\n The coordinates of the point from which popups will \"open\",\n relative to the icon anchor.\n\n \"\"\"\n _template = Template(u\"\"\"\n {% macro script(this, kwargs) %}\n var {{ this.get_name() }} = L.icon({{ this.options|tojson }});\n {{ this._parent.get_name() }}.setIcon({{ this.get_name() }});\n {% endmacro %}\n \"\"\") # noqa\n\n def __init__(self, icon_image, icon_size=None, icon_anchor=None,\n shadow_image=None, shadow_size=None, shadow_anchor=None,\n popup_anchor=None):\n super(Icon, self).__init__()\n self._name = 'CustomIcon'\n self.options = parse_options(\n icon_url=image_to_url(icon_image),\n icon_size=icon_size,\n icon_anchor=icon_anchor,\n shadow_url=shadow_image and image_to_url(shadow_image),\n shadow_size=shadow_size,\n shadow_anchor=shadow_anchor,\n popup_anchor=popup_anchor,\n )\n\n\nclass ColorLine(FeatureGroup):\n \"\"\"\n Draw data on a map with specified colors.\n\n Parameters\n ----------\n positions: tuple or list\n The list of points latitude and longitude\n colors: tuple or list\n The list of segments colors.\n It must have length equal to `len(positions)-1`.\n colormap: branca.colormap.Colormap or list or tuple\n The colormap to use. If a list or tuple of colors is provided,\n a LinearColormap will be created from it.\n nb_steps: int, default 12\n To have lighter output the colormap will be discretized\n to that number of colors.\n opacity: float, default 1\n Line opacity, scale 0-1\n weight: int, default 2\n Stroke weight in pixels\n **kwargs\n Further parameters available. See folium.map.FeatureGroup\n\n Returns\n -------\n A ColorLine object that you can `add_to` a Map.\n\n \"\"\"\n\n def __init__(self, positions, colors, colormap=None, nb_steps=12,\n weight=None, opacity=None, **kwargs):\n super(ColorLine, self).__init__(**kwargs)\n self._name = 'ColorLine'\n positions = validate_locations(positions)\n\n if colormap is None:\n cm = LinearColormap(['green', 'yellow', 'red'],\n vmin=min(colors),\n vmax=max(colors),\n ).to_step(nb_steps)\n elif isinstance(colormap, LinearColormap):\n cm = colormap.to_step(nb_steps)\n elif isinstance(colormap, list) or isinstance(colormap, tuple):\n cm = LinearColormap(colormap,\n vmin=min(colors),\n vmax=max(colors),\n ).to_step(nb_steps)\n else:\n cm = colormap\n out = {}\n for (lat1, lng1), (lat2, lng2), color in zip(positions[:-1], positions[1:], colors): # noqa\n out.setdefault(cm(color), []).append([[lat1, lng1], [lat2, lng2]])\n for key, val in out.items():\n self.add_child(PolyLine(val, color=key, weight=weight, opacity=opacity)) # noqa\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.asarray", "numpy.iinfo", "scipy.sparse.dia_matrix", "scipy.sparse.issparse", "scipy.linalg.lstsq", "numpy.full", "numpy.ravel", "numpy.zeros", "numpy.ascontiguousarray", "scipy.sparse.csr_matrix", "scipy.sparse.linalg.LinearOperator", "numpy.find_common_type", "numpy.sum", "scipy.sparse.isspmatrix", "scipy.special.expit", "scipy.optimize.nnls", "numpy.empty", "numpy.ones", "numpy.average", "numpy.vstack" ], [ "sklearn.ensemble.RandomForestRegressor", "sklearn.datasets.make_classification", "sklearn.inspection._partial_dependence._grid_from_X", "sklearn.cluster.KMeans", "numpy.asarray", "sklearn.ensemble.HistGradientBoostingClassifier", "sklearn.preprocessing.PolynomialFeatures", "sklearn.base.clone", "numpy.mean", "sklearn.inspection._partial_dependence._partial_dependence_brute", "numpy.iinfo", "sklearn.utils._testing.assert_allclose", "numpy.allclose", "sklearn.ensemble.GradientBoostingRegressor", "sklearn.utils._testing.assert_array_equal", "sklearn.ensemble.HistGradientBoostingRegressor", "sklearn.dummy.DummyClassifier", "sklearn.tree.tests.test_tree.assert_is_subtree", "sklearn.datasets.load_iris", "sklearn.inspection.partial_dependence", "sklearn.inspection._partial_dependence._partial_dependence_recursion", "sklearn.utils.validation.check_random_state", "numpy.testing.assert_allclose", "sklearn.ensemble.GradientBoostingClassifier", "numpy.corrcoef", "numpy.array", "numpy.random.RandomState", "sklearn.preprocessing.scale", "sklearn.tree.DecisionTreeRegressor", "sklearn.linear_model.LogisticRegression", "sklearn.pipeline.make_pipeline", "sklearn.preprocessing.RobustScaler", "numpy.ones", "sklearn.datasets.make_regression", "sklearn.linear_model.LinearRegression", "sklearn.preprocessing.StandardScaler" ], [ "numpy.log", "numpy.abs", "numpy.power", "numpy.ones", "numpy.finfo", "numpy.copy", "numpy.argmax", "scipy.special.xlogy", "numpy.any", "numpy.average", "numpy.exp", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.vstack" ], [ "numpy.log", "numpy.log2", "scipy.sparse.issparse", "numpy.sqrt", "numpy.unique", "numpy.asarray", "numpy.reshape", "numpy.ascontiguousarray", "numpy.atleast_1d", "numpy.copy", "numpy.argmax", "numpy.any", "numpy.iinfo", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.isnan", "numpy.any", "numpy.digitize", "numpy.histogram", "numpy.nextafter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zysilence/tensorforce
[ "7539e5dde66f3a93b881006f9b7f38c926ced21b", "7539e5dde66f3a93b881006f9b7f38c926ced21b", "7539e5dde66f3a93b881006f9b7f38c926ced21b", "3c3b9c3ac153761016cf9883b76613c9d93952bf" ]
[ "tensorforce/core/memories/latest.py", "tensorforce/core/memories/queue.py", "tensorforce/contrib/deepmind_lab.py", "examples/threaded_ale.py" ]
[ "# Copyright 2017 reinforce.io. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport tensorflow as tf\n\nfrom tensorforce.core.memories import Queue\n\n\nclass Latest(Queue):\n \"\"\"\n Memory which always retrieves most recent experiences.\n \"\"\"\n\n def __init__(self, states, internals, actions, include_next_states, capacity, scope='latest', summary_labels=None):\n \"\"\"\n Latest memory.\n\n Args:\n states: States specifiction.\n internals: Internal states specification.\n actions: Actions specification.\n include_next_states: Include subsequent state if true.\n capacity: Memory capacity.\n \"\"\"\n super(Latest, self).__init__(\n states=states,\n internals=internals,\n actions=actions,\n include_next_states=include_next_states,\n capacity=capacity,\n scope=scope,\n summary_labels=summary_labels\n )\n\n def tf_retrieve_timesteps(self, n):\n num_timesteps = (self.memory_index - self.episode_indices[-1] - 2) % self.capacity + 1\n n = tf.minimum(x=n, y=num_timesteps)\n indices = tf.range(\n start=(self.memory_index - n),\n limit=self.memory_index\n ) % self.capacity\n return self.retrieve_indices(indices=indices)\n\n def tf_retrieve_episodes(self, n):\n n = tf.minimum(x=n, y=self.episode_count)\n start = self.episode_indices[self.episode_count - n - 1] + 1\n limit = self.episode_indices[self.episode_count - 1] + 1\n limit += tf.where(condition=(start < limit), x=0, y=self.capacity)\n indices = tf.range(start=start, limit=limit) % self.capacity\n return self.retrieve_indices(indices=indices)\n\n def tf_retrieve_sequences(self, n, sequence_length):\n # Remove once #128 is resolved\n tf.logging.warn(\"Sampling sequences is not validated yet. Use timesteps or episodes instead.\")\n num_sequences = (self.memory_index - self.episode_indices[-1] - 2 - sequence_length + 1) % self.capacity + 1\n n = tf.minimum(x=n, y=num_sequences)\n indices = tf.range(\n start=(self.memory_index - n - sequence_length), # or '- 1' implied in sequence length?\n limit=self.memory_index\n ) % self.capacity\n # sequence_indices = [tf.range(start=indices[n], limit=(indices[n] + sequence_length)) for k in range(n)]\n # sequence_indices = [indices[k: k + sequence_length] for k in tf.unstack(value=tf.range(start=0, limit=n), num=n)]\n sequence_indices = tf.expand_dims(input=tf.range(start=0, limit=n), axis=1) + tf.expand_dims(input=tf.constant(value=list(range(sequence_length))), axis=0)\n sequence_indices = tf.reshape(tensor=sequence_indices, shape=(n * sequence_length,))\n # sequence_indices = tf.concat(values=sequence_indices, axis=0) # tf.stack !!!!!\n terminal = tf.gather(params=self.terminal_memory, indices=indices)\n sequence_indices = tf.boolean_mask(tensor=sequence_indices, mask=tf.logical_not(x=terminal))\n return self.retrieve_indices(indices=sequence_indices)\n", "# Copyright 2017 reinforce.io. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport tensorflow as tf\n\nfrom tensorforce import util\nfrom tensorforce.core.memories import Memory\n\n\nclass Queue(Memory):\n \"\"\"\n Base class for memories organized as a queue (FIFO).\n \"\"\"\n\n def __init__(self, states, internals, actions, include_next_states, capacity, scope='queue', summary_labels=None):\n \"\"\"\n Queue memory.\n\n Args:\n capacity: Memory capacity.\n \"\"\"\n self.capacity = capacity\n self.scope = scope\n\n # Pieces of the records are stored in different tensors:\n self.states_memory = dict() # keys=state space components\n self.internals_memory = dict() # keys=internal state components\n self.actions_memory = dict() # keys=action space components\n self.terminal_memory = None # 1D tensor\n self.reward_memory = None # 1D tensor\n self.memory_index = None # 0D (int) tensor (points to the next record to be overwritten)\n self.episode_indices = None # 1D tensor of indexes where episodes start.\n self.episode_count = None # 0D (int) tensor: How many episodes do we have stored?\n\n self.retrieve_indices = None\n\n super(Queue, self).__init__(\n states=states,\n internals=internals,\n actions=actions,\n include_next_states=include_next_states,\n scope=scope,\n summary_labels=summary_labels\n )\n\n def setup_template_funcs(self, custom_getter=None):\n custom_getter = super(Queue, self).setup_template_funcs(custom_getter=custom_getter)\n\n self.retrieve_indices = tf.make_template(\n name_=(self.scope + '/retrieve_indices'),\n func_=self.tf_retrieve_indices,\n custom_getter_=custom_getter\n )\n\n def tf_initialize(self):\n # States\n for name in sorted(self.states_spec):\n state = self.states_spec[name]\n self.states_memory[name] = tf.get_variable(\n name=('state-' + name),\n shape=(self.capacity,) + tuple(state['shape']),\n dtype=util.tf_dtype(state['type']),\n trainable=False\n )\n\n # Internals\n for name in sorted(self.internals_spec):\n internal = self.internals_spec[name]\n self.internals_memory[name] = tf.get_variable(\n name=('internal-' + name),\n shape=(self.capacity,) + tuple(internal['shape']),\n dtype=util.tf_dtype(internal['type']),\n trainable=False\n )\n\n # Actions\n for name in sorted(self.actions_spec):\n action = self.actions_spec[name]\n self.actions_memory[name] = tf.get_variable(\n name=('action-' + name),\n shape=(self.capacity,) + tuple(action['shape']),\n dtype=util.tf_dtype(action['type']),\n trainable=False\n )\n\n # Terminal\n self.terminal_memory = tf.get_variable(\n name='terminal',\n shape=(self.capacity,),\n dtype=util.tf_dtype('bool'),\n initializer=tf.constant_initializer(\n value=False,\n dtype=util.tf_dtype('bool')\n ),\n trainable=False\n )\n\n # Reward\n self.reward_memory = tf.get_variable(\n name='reward',\n shape=(self.capacity,),\n dtype=util.tf_dtype('float'),\n trainable=False\n )\n\n # Memory index\n self.memory_index = tf.get_variable(\n name='memory-index',\n dtype=util.tf_dtype('int'),\n initializer=0,\n trainable=False\n )\n\n # Episode indices\n self.episode_indices = tf.get_variable(\n name='episode-indices',\n shape=(self.capacity + 1,),\n dtype=util.tf_dtype('int'),\n initializer=tf.constant_initializer(value=(self.capacity - 1), dtype=util.tf_dtype('int')),\n trainable=False\n )\n\n # Episodes index\n self.episode_count = tf.get_variable(\n name='episode-count',\n dtype=util.tf_dtype('int'),\n initializer=0,\n trainable=False\n )\n\n def tf_store(self, states, internals, actions, terminal, reward):\n # Memory indices to overwrite.\n num_instances = tf.shape(input=terminal)[0]\n with tf.control_dependencies([tf.assert_less_equal(num_instances, self.capacity)]):\n indices = tf.range(self.memory_index, self.memory_index + num_instances) % self.capacity\n\n # Remove episode indices.\n num_episodes = tf.count_nonzero(\n input_tensor=tf.gather(params=self.terminal_memory, indices=indices),\n axis=0,\n dtype=util.tf_dtype('int')\n )\n num_episodes = tf.minimum(x=num_episodes, y=self.episode_count)\n assignment = tf.assign(\n ref=self.episode_indices[:self.episode_count - num_episodes],\n value=self.episode_indices[num_episodes: self.episode_count]\n )\n\n # Decrement episode count.\n with tf.control_dependencies(control_inputs=(assignment,)):\n assignment = tf.assign_sub(ref=self.episode_count, value=num_episodes)\n\n # Assign new observations.\n with tf.control_dependencies(control_inputs=(assignment,)):\n assignments = list()\n for name in sorted(states):\n assignments.append(tf.scatter_update(\n ref=self.states_memory[name],\n indices=indices,\n updates=states[name]\n ))\n for name in sorted(internals):\n assignments.append(tf.scatter_update(\n ref=self.internals_memory[name],\n indices=indices,\n updates=internals[name]\n ))\n for name in sorted(actions):\n assignments.append(tf.scatter_update(\n ref=self.actions_memory[name],\n indices=indices,\n updates=actions[name]\n ))\n assignments.append(tf.scatter_update(ref=self.terminal_memory, indices=indices, updates=terminal))\n assignments.append(tf.scatter_update(ref=self.reward_memory, indices=indices, updates=reward))\n\n # Add episode indices.\n with tf.control_dependencies(control_inputs=assignments):\n num_episodes = tf.count_nonzero(input_tensor=terminal, axis=0, dtype=util.tf_dtype('int'))\n assignment = tf.assign(\n ref=self.episode_indices[self.episode_count: self.episode_count + num_episodes],\n value=tf.boolean_mask(tensor=indices, mask=terminal)\n )\n\n # Increment episode count.\n with tf.control_dependencies(control_inputs=(assignment,)):\n assignment = tf.assign_add(ref=self.episode_count, value=num_episodes)\n\n # Increment memory index.\n with tf.control_dependencies(control_inputs=(assignment,)):\n assignment = tf.assign(\n ref=self.episode_indices[-1],\n value=tf.where(self.memory_index + num_instances > self.capacity,\n self.episode_indices[self.episode_count - 1], self.capacity - 1)\n )\n\n with tf.control_dependencies(control_inputs=(assignment,)):\n assignment = tf.assign(ref=self.memory_index, value=((self.memory_index + num_instances) % self.capacity))\n\n with tf.control_dependencies(control_inputs=(assignment,)):\n return tf.no_op()\n\n def tf_retrieve_indices(self, indices):\n \"\"\"\n Fetches experiences for given indices.\n\n Args:\n indices: Index tensor\n\n Returns: Batch of experiences\n \"\"\"\n states = dict()\n for name in sorted(self.states_memory):\n states[name] = tf.gather(params=self.states_memory[name], indices=indices)\n\n internals = dict()\n for name in sorted(self.internals_memory):\n internals[name] = tf.gather(params=self.internals_memory[name], indices=indices)\n\n actions = dict()\n for name in sorted(self.actions_memory):\n actions[name] = tf.gather(params=self.actions_memory[name], indices=indices)\n\n terminal = tf.gather(params=self.terminal_memory, indices=indices)\n reward = tf.gather(params=self.reward_memory, indices=indices)\n\n if self.include_next_states:\n assert util.rank(indices) == 1\n next_indices = (indices + 1) % self.capacity\n\n next_states = dict()\n for name in sorted(self.states_memory):\n next_states[name] = tf.gather(params=self.states_memory[name], indices=next_indices)\n\n next_internals = dict()\n for name in sorted(self.internals_memory):\n next_internals[name] = tf.gather(params=self.internals_memory[name], indices=next_indices)\n\n return dict(\n states=states,\n internals=internals,\n actions=actions,\n terminal=terminal,\n reward=reward,\n next_states=next_states,\n next_internals=next_internals\n )\n else:\n return dict(\n states=states,\n internals=internals,\n actions=actions,\n terminal=terminal,\n reward=reward\n )\n", "# Copyright 2017 reinforce.io. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nimport deepmind_lab\nfrom tensorforce.environments.environment import Environment\n\n\n# TODO this has not been tested since 0.3 - potentially deprecated API\nclass DeepMindLab(Environment):\n \"\"\"\n DeepMind Lab Integration:\n https://arxiv.org/abs/1612.03801\n https://github.com/deepmind/lab\n\n Since DeepMind lab is only available as source code, a manual install\n via bazel is required. Further, due to the way bazel handles external\n dependencies, cloning TensorForce into lab is the most convenient way to\n run it using the bazel BUILD file we provide. To use lab, first download\n and install it according to instructions\n <https://github.com/deepmind/lab/blob/master/docs/build.md>:\n\n ```bash\n git clone https://github.com/deepmind/lab.git\n ```\n\n Add to the lab main BUILD file:\n\n ```\n package(default_visibility = [\"//visibility:public\"])\n ```\n\n Clone TensorForce into the lab directory, then run the TensorForce bazel runner.\n\n Note that using any specific configuration file currently requires changing the Tensorforce\n BUILD file to adjust environment parameters.\n\n ```bash\n bazel run //tensorforce:lab_runner\n ```\n\n Please note that we have not tried to reproduce any lab results yet, and\n these instructions just explain connectivity in case someone wants to\n get started there.\n\n\n \"\"\"\n\n def __init__(\n self,\n level_id,\n repeat_action=1,\n state_attribute='RGB_INTERLACED',\n settings={'width': '320', 'height': '240', 'fps': '60', 'appendCommand': ''}\n ):\n \"\"\"\n Initialize DeepMind Lab environment.\n\n Args:\n level_id: string with id/descriptor of the level, e.g. 'seekavoid_arena_01'.\n repeat_action: number of frames the environment is advanced, executing the given action during every frame.\n state_attribute: Attributes which represents the state for this environment, should adhere to the\n specification given in DeepMindLabEnvironment.state_spec(level_id).\n settings: dict specifying additional settings as key-value string pairs. The following options\n are recognized: 'width' (horizontal resolution of the observation frames), 'height'\n (vertical resolution of the observation frames), 'fps' (frames per second) and 'appendCommand'\n (commands for the internal Quake console).\n\n \"\"\"\n self.level_id = level_id\n self.level = deepmind_lab.Lab(level=level_id, observations=[state_attribute], config=settings)\n self.repeat_action = repeat_action\n self.state_attribute = state_attribute\n\n def __str__(self):\n return 'DeepMindLab({})'.format(self.level_id)\n\n def close(self):\n \"\"\"\n Closes the environment and releases the underlying Quake III Arena instance.\n No other method calls possible afterwards.\n \"\"\"\n self.level.close()\n self.level = None\n\n def reset(self):\n \"\"\"\n Resets the environment to its initialization state. This method needs to be called to start a\n new episode after the last episode ended.\n\n :return: initial state\n \"\"\"\n self.level.reset() # optional: episode=-1, seed=None\n return self.level.observations()[self.state_attribute]\n\n def execute(self, action):\n \"\"\"\n Pass action to universe environment, return reward, next step, terminal state and\n additional info.\n\n :param action: action to execute as numpy array, should have dtype np.intc and should adhere to\n the specification given in DeepMindLabEnvironment.action_spec(level_id)\n :return: dict containing the next state, the reward, and a boolean indicating if the\n next state is a terminal state\n \"\"\"\n adjusted_action = list()\n for action_spec in self.level.action_spec():\n if action_spec['min'] == -1 and action_spec['max'] == 1:\n adjusted_action.append(action[action_spec['name']] - 1)\n else:\n adjusted_action.append(action[action_spec['name']]) # clip?\n action = np.array(adjusted_action, dtype=np.intc)\n\n reward = self.level.step(action=action, num_steps=self.repeat_action)\n state = self.level.observations()['RGB_INTERLACED']\n terminal = not self.level.is_running()\n return state, terminal, reward\n\n @property\n def states(self):\n states = dict()\n\n for state in self.level.observation_spec():\n state_type = state['dtype']\n\n if state_type == np.uint8:\n state_type = np.float32\n\n if state['name'] == self.state_attribute:\n return dict(shape=state['shape'], type=state_type)\n\n return states\n\n @property\n def actions(self):\n actions = dict()\n for action in self.level.action_spec():\n if action['min'] == -1 and action['max'] == 1:\n actions[action['name']] = dict(type='int', num_actions=3)\n else:\n actions[action['name']] = dict(type='float', min_value=action['min'], max_value=action['max'])\n return actions\n\n @property\n def num_steps(self):\n \"\"\"\n Number of frames since the last reset() call.\n \"\"\"\n return self.level.num_steps()\n\n @property\n def fps(self):\n \"\"\"\n An advisory metric that correlates discrete environment steps (\"frames\") with real\n (wallclock) time: the number of frames per (real) second.\n \"\"\"\n return self.level.fps()\n", "# Copyright 2017 reinforce.io. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"\nArcade Learning Environment execution.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom copy import deepcopy\n\nfrom six.moves import xrange\nimport argparse\nimport logging\nimport os\nimport sys\nimport time\nimport numpy as np\n\nfrom tensorforce import TensorForceError\nfrom tensorforce.agents import agents as AgentsDictionary, Agent\nimport json\nfrom tensorforce.execution import ThreadedRunner\nfrom tensorforce.contrib.ale import ALE\nfrom tensorforce.execution.threaded_runner import WorkerAgentGenerator\n\n\"\"\"\nTo replicate the Asynchronous Methods for Deep Reinforcement Learning paper (https://arxiv.org/abs/1602.01783)\nNstep DQN:\n python threaded_ale.py breakout.bin -a configs/dqn_visual.json -n \n configs/cnn_dqn2013_network.json -fs 4 -ea -w 4\n\n\n Note: batch_size in the config should be set to n+1 where n is the desired number of steps\n\"\"\"\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('rom', help=\"File path of the rom\")\n parser.add_argument('-a', '--agent-config', help=\"Agent configuration file\")\n parser.add_argument('-n', '--network-spec', default=None, help=\"Network specification file\")\n parser.add_argument('-w', '--workers', help=\"Number of threads to run where the model is shared\", type=int, default=16)\n parser.add_argument('-fs', '--frame-skip', help=\"Number of frames to repeat action\", type=int, default=1)\n parser.add_argument('-rap', '--repeat-action-probability', help=\"Repeat action probability\", type=float, default=0.0)\n parser.add_argument('-lolt', '--loss-of-life-termination', help=\"Loss of life counts as terminal state\", action='store_true')\n parser.add_argument('-lolr', '--loss-of-life-reward', help=\"Loss of life reward/penalty. EX: -1 to penalize\", type=float, default=0.0)\n parser.add_argument('-ea', '--epsilon-annealing', help='Create separate epislon annealing schedules per thread', action='store_true')\n parser.add_argument('-ds', '--display-screen', action='store_true', default=False, help=\"Display emulator screen\")\n parser.add_argument('-e', '--episodes', type=int, default=50000, help=\"Number of episodes\")\n parser.add_argument('-t', '--max-timesteps', type=int, default=2000, help=\"Maximum number of timesteps per episode\")\n parser.add_argument('-s', '--save', help=\"Save agent to this dir\")\n parser.add_argument('-se', '--save-episodes', type=int, default=100, help=\"Save agent every x episodes\")\n parser.add_argument('-l', '--load', help=\"Load agent from this dir\")\n parser.add_argument('-D', '--debug', action='store_true', default=False, help=\"Show debug outputs\")\n\n args = parser.parse_args()\n\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG) # configurable!!!\n logger.addHandler(logging.StreamHandler(sys.stdout))\n\n environments = [ALE(args.rom, frame_skip=args.frame_skip,\n repeat_action_probability=args.repeat_action_probability,\n loss_of_life_termination=args.loss_of_life_termination,\n loss_of_life_reward=args.loss_of_life_reward,\n display_screen=args.display_screen) for _ in range(args.workers)]\n\n if args.network_spec:\n with open(args.network_spec, 'r') as fp:\n network_spec = json.load(fp=fp)\n else:\n network_spec = None\n logger.info(\"No network configuration provided.\")\n\n agent_configs = []\n if args.agent_config is not None:\n with open(args.agent_config, 'r') as fp:\n agent_config = json.load(fp=fp)\n else:\n raise TensorForceError(\"No agent configuration provided.\")\n\n for i in range(args.workers):\n worker_config = deepcopy(agent_config)\n\n # Optionally overwrite epsilon final values\n if \"explorations_spec\" in worker_config and worker_config['explorations_spec']['type'] == \"epsilon_anneal\":\n if args.epsilon_annealing:\n # epsilon final values are [0.5, 0.1, 0.01] with probabilities [0.3, 0.4, 0.3]\n epsilon_final = np.random.choice([0.5, 0.1, 0.01], p=[0.3, 0.4, 0.3])\n worker_config['explorations_spec'][\"epsilon_final\"] = epsilon_final\n\n agent_configs.append(worker_config)\n\n # Let the first agent create the model\n # Manually assign model\n logger.info(agent_configs[0])\n\n agent = Agent.from_spec(\n spec=agent_configs[0],\n kwargs=dict(\n states=environments[0].states,\n actions=environments[0].actions,\n network=network_spec\n )\n )\n\n agents = [agent]\n\n for i in xrange(args.workers - 1):\n config = agent_configs[i]\n agent_type = config.pop('type', None)\n worker = WorkerAgentGenerator(AgentsDictionary[agent_type])(\n states=environments[0].states,\n actions=environments[0].actions,\n network=network_spec,\n model=agent.model,\n **config\n )\n agents.append(worker)\n\n if args.load:\n load_dir = os.path.dirname(args.load)\n if not os.path.isdir(load_dir):\n raise OSError(\"Could not load agent from {}: No such directory.\".format(load_dir))\n agent.restore_model(args.load)\n\n if args.debug:\n logger.info(\"-\" * 16)\n logger.info(\"Configuration:\")\n logger.info(agent_configs[0])\n\n if args.save:\n save_dir = os.path.dirname(args.save)\n if not os.path.isdir(save_dir):\n try:\n os.mkdir(save_dir, 0o755)\n except OSError:\n raise OSError(\"Cannot save agent to dir {} ()\".format(save_dir))\n\n def episode_finished(stats):\n if args.debug:\n logger.info(\n \"Thread {t}. Finished episode {ep} after {ts} timesteps. Reward {r}\".\n format(t=stats['thread_id'], ep=stats['episode'], ts=stats['timestep'], r=stats['episode_reward'])\n )\n return True\n\n def summary_report(r):\n et = time.time()\n logger.info('=' * 40)\n logger.info('Current Step/Episode: {}/{}'.format(r.global_step, r.global_episode))\n logger.info('SPS: {}'.format(r.global_step / (et - r.start_time)))\n reward_list = r.episode_rewards\n if len(reward_list) > 0:\n logger.info('Max Reward: {}'.format(np.max(reward_list)))\n logger.info(\"Average of last 500 rewards: {}\".format(sum(reward_list[-500:]) / 500))\n logger.info(\"Average of last 100 rewards: {}\".format(sum(reward_list[-100:]) / 100))\n logger.info('=' * 40)\n\n # Create runners\n threaded_runner = ThreadedRunner(\n agents,\n environments,\n repeat_actions=1,\n save_path=args.save,\n save_episodes=args.save_episodes\n )\n\n logger.info(\"Starting {agent} for Environment '{env}'\".format(agent=agent, env=environments[0]))\n threaded_runner.run(summary_interval=100, episode_finished=episode_finished, summary_report=summary_report)\n threaded_runner.close()\n logger.info(\"Learning finished. Total episodes: {ep}\".format(ep=threaded_runner.global_episode))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.range", "tensorflow.reshape", "tensorflow.minimum", "tensorflow.logging.warn", "tensorflow.gather", "tensorflow.where", "tensorflow.logical_not" ], [ "tensorflow.scatter_update", "tensorflow.boolean_mask", "tensorflow.assign_add", "tensorflow.control_dependencies", "tensorflow.shape", "tensorflow.range", "tensorflow.minimum", "tensorflow.assign", "tensorflow.assert_less_equal", "tensorflow.gather", "tensorflow.no_op", "tensorflow.make_template", "tensorflow.assign_sub", "tensorflow.where" ], [ "numpy.array" ], [ "numpy.max", "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jjhelmus/scikit-image
[ "b9b5fde0821fe8bcece2528b30d012c65c64ad6f", "b9b5fde0821fe8bcece2528b30d012c65c64ad6f", "b9b5fde0821fe8bcece2528b30d012c65c64ad6f", "b9b5fde0821fe8bcece2528b30d012c65c64ad6f", "b9b5fde0821fe8bcece2528b30d012c65c64ad6f" ]
[ "skimage/transform/radon_transform.py", "skimage/measure/tests/test_regionprops.py", "skimage/filters/tests/test_thresholding.py", "skimage/io/tests/test_pil.py", "skimage/transform/_geometric.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nradon.py - Radon and inverse radon transforms\n\nBased on code of Justin K. Romberg\n(http://www.clear.rice.edu/elec431/projects96/DSP/bpanalysis.html)\nJ. Gillam and Chris Griffin.\n\nReferences:\n -B.R. Ramesh, N. Srinivasa, K. Rajgopal, \"An Algorithm for Computing\n the Discrete Radon Transform With Some Applications\", Proceedings of\n the Fourth IEEE Region 10 International Conference, TENCON '89, 1989.\n -A. C. Kak, Malcolm Slaney, \"Principles of Computerized Tomographic\n Imaging\", IEEE Press 1988.\n\"\"\"\nfrom __future__ import division\nimport numpy as np\nfrom scipy.fftpack import fft, ifft, fftfreq\nfrom scipy.interpolate import interp1d\nfrom ._warps_cy import _warp_fast\nfrom ._radon_transform import sart_projection_update\nfrom .. import util\nfrom warnings import warn\n\n\n__all__ = [\"radon\", \"iradon\", \"iradon_sart\"]\n\n\ndef radon(image, theta=None, circle=False):\n \"\"\"\n Calculates the radon transform of an image given specified\n projection angles.\n\n Parameters\n ----------\n image : array_like, dtype=float\n Input image. The rotation axis will be located in the pixel with\n indices ``(image.shape[0] // 2, image.shape[1] // 2)``.\n theta : array_like, dtype=float, optional (default np.arange(180))\n Projection angles (in degrees).\n circle : boolean, optional\n Assume image is zero outside the inscribed circle, making the\n width of each projection (the first dimension of the sinogram)\n equal to ``min(image.shape)``.\n\n Returns\n -------\n radon_image : ndarray\n Radon transform (sinogram). The tomography rotation axis will lie\n at the pixel index ``radon_image.shape[0] // 2`` along the 0th\n dimension of ``radon_image``.\n\n \"\"\"\n if image.ndim != 2:\n raise ValueError('The input image must be 2-D')\n if theta is None:\n theta = np.arange(180)\n\n if circle:\n radius = min(image.shape) // 2\n c0, c1 = np.ogrid[0:image.shape[0], 0:image.shape[1]]\n reconstruction_circle = ((c0 - image.shape[0] // 2) ** 2\n + (c1 - image.shape[1] // 2) ** 2)\n reconstruction_circle = reconstruction_circle <= radius ** 2\n if not np.all(reconstruction_circle | (image == 0)):\n warn('Radon transform: image must be zero outside the '\n 'reconstruction circle')\n # Crop image to make it square\n slices = []\n for d in (0, 1):\n if image.shape[d] > min(image.shape):\n excess = image.shape[d] - min(image.shape)\n slices.append(slice(int(np.ceil(excess / 2)),\n int(np.ceil(excess / 2)\n + min(image.shape))))\n else:\n slices.append(slice(None))\n slices = tuple(slices)\n padded_image = image[slices]\n else:\n diagonal = np.sqrt(2) * max(image.shape)\n pad = [int(np.ceil(diagonal - s)) for s in image.shape]\n new_center = [(s + p) // 2 for s, p in zip(image.shape, pad)]\n old_center = [s // 2 for s in image.shape]\n pad_before = [nc - oc for oc, nc in zip(old_center, new_center)]\n pad_width = [(pb, p - pb) for pb, p in zip(pad_before, pad)]\n padded_image = util.pad(image, pad_width, mode='constant',\n constant_values=0)\n # padded_image is always square\n assert padded_image.shape[0] == padded_image.shape[1]\n radon_image = np.zeros((padded_image.shape[0], len(theta)))\n center = padded_image.shape[0] // 2\n\n shift0 = np.array([[1, 0, -center],\n [0, 1, -center],\n [0, 0, 1]])\n shift1 = np.array([[1, 0, center],\n [0, 1, center],\n [0, 0, 1]])\n\n def build_rotation(theta):\n T = np.deg2rad(theta)\n R = np.array([[np.cos(T), np.sin(T), 0],\n [-np.sin(T), np.cos(T), 0],\n [0, 0, 1]])\n return shift1.dot(R).dot(shift0)\n\n for i in range(len(theta)):\n rotated = _warp_fast(padded_image, build_rotation(theta[i]))\n radon_image[:, i] = rotated.sum(0)\n return radon_image\n\n\ndef _sinogram_circle_to_square(sinogram):\n diagonal = int(np.ceil(np.sqrt(2) * sinogram.shape[0]))\n pad = diagonal - sinogram.shape[0]\n old_center = sinogram.shape[0] // 2\n new_center = diagonal // 2\n pad_before = new_center - old_center\n pad_width = ((pad_before, pad - pad_before), (0, 0))\n return util.pad(sinogram, pad_width, mode='constant', constant_values=0)\n\n\ndef iradon(radon_image, theta=None, output_size=None,\n filter=\"ramp\", interpolation=\"linear\", circle=False):\n \"\"\"\n Inverse radon transform.\n\n Reconstruct an image from the radon transform, using the filtered\n back projection algorithm.\n\n Parameters\n ----------\n radon_image : array_like, dtype=float\n Image containing radon transform (sinogram). Each column of\n the image corresponds to a projection along a different angle. The\n tomography rotation axis should lie at the pixel index\n ``radon_image.shape[0] // 2`` along the 0th dimension of\n ``radon_image``.\n theta : array_like, dtype=float, optional\n Reconstruction angles (in degrees). Default: m angles evenly spaced\n between 0 and 180 (if the shape of `radon_image` is (N, M)).\n output_size : int\n Number of rows and columns in the reconstruction.\n filter : str, optional (default ramp)\n Filter used in frequency domain filtering. Ramp filter used by default.\n Filters available: ramp, shepp-logan, cosine, hamming, hann.\n Assign None to use no filter.\n interpolation : str, optional (default 'linear')\n Interpolation method used in reconstruction. Methods available:\n 'linear', 'nearest', and 'cubic' ('cubic' is slow).\n circle : boolean, optional\n Assume the reconstructed image is zero outside the inscribed circle.\n Also changes the default output_size to match the behaviour of\n ``radon`` called with ``circle=True``.\n\n Returns\n -------\n reconstructed : ndarray\n Reconstructed image. The rotation axis will be located in the pixel\n with indices\n ``(reconstructed.shape[0] // 2, reconstructed.shape[1] // 2)``.\n\n Notes\n -----\n It applies the Fourier slice theorem to reconstruct an image by\n multiplying the frequency domain of the filter with the FFT of the\n projection data. This algorithm is called filtered back projection.\n\n \"\"\"\n if radon_image.ndim != 2:\n raise ValueError('The input image must be 2-D')\n if theta is None:\n m, n = radon_image.shape\n theta = np.linspace(0, 180, n, endpoint=False)\n else:\n theta = np.asarray(theta)\n if len(theta) != radon_image.shape[1]:\n raise ValueError(\"The given ``theta`` does not match the number of \"\n \"projections in ``radon_image``.\")\n interpolation_types = ('linear', 'nearest', 'cubic')\n if not interpolation in interpolation_types:\n raise ValueError(\"Unknown interpolation: %s\" % interpolation)\n if not output_size:\n # If output size not specified, estimate from input radon image\n if circle:\n output_size = radon_image.shape[0]\n else:\n output_size = int(np.floor(np.sqrt((radon_image.shape[0]) ** 2\n / 2.0)))\n if circle:\n radon_image = _sinogram_circle_to_square(radon_image)\n\n th = (np.pi / 180.0) * theta\n # resize image to next power of two (but no less than 64) for\n # Fourier analysis; speeds up Fourier and lessens artifacts\n projection_size_padded = \\\n max(64, int(2 ** np.ceil(np.log2(2 * radon_image.shape[0]))))\n pad_width = ((0, projection_size_padded - radon_image.shape[0]), (0, 0))\n img = util.pad(radon_image, pad_width, mode='constant', constant_values=0)\n\n # Construct the Fourier filter\n f = fftfreq(projection_size_padded).reshape(-1, 1) # digital frequency\n omega = 2 * np.pi * f # angular frequency\n fourier_filter = 2 * np.abs(f) # ramp filter\n if filter == \"ramp\":\n pass\n elif filter == \"shepp-logan\":\n # Start from first element to avoid divide by zero\n fourier_filter[1:] = fourier_filter[1:] * np.sin(omega[1:]) / omega[1:]\n elif filter == \"cosine\":\n fourier_filter *= np.cos(omega)\n elif filter == \"hamming\":\n fourier_filter *= (0.54 + 0.46 * np.cos(omega / 2))\n elif filter == \"hann\":\n fourier_filter *= (1 + np.cos(omega / 2)) / 2\n elif filter is None:\n fourier_filter[:] = 1\n else:\n raise ValueError(\"Unknown filter: %s\" % filter)\n # Apply filter in Fourier domain\n projection = fft(img, axis=0) * fourier_filter\n radon_filtered = np.real(ifft(projection, axis=0))\n\n # Resize filtered image back to original size\n radon_filtered = radon_filtered[:radon_image.shape[0], :]\n reconstructed = np.zeros((output_size, output_size))\n # Determine the center of the projections (= center of sinogram)\n mid_index = radon_image.shape[0] // 2\n\n [X, Y] = np.mgrid[0:output_size, 0:output_size]\n xpr = X - int(output_size) // 2\n ypr = Y - int(output_size) // 2\n\n # Reconstruct image by interpolation\n for i in range(len(theta)):\n t = ypr * np.cos(th[i]) - xpr * np.sin(th[i])\n x = np.arange(radon_filtered.shape[0]) - mid_index\n if interpolation == 'linear':\n backprojected = np.interp(t, x, radon_filtered[:, i],\n left=0, right=0)\n else:\n interpolant = interp1d(x, radon_filtered[:, i], kind=interpolation,\n bounds_error=False, fill_value=0)\n backprojected = interpolant(t)\n reconstructed += backprojected\n if circle:\n radius = output_size // 2\n reconstruction_circle = (xpr ** 2 + ypr ** 2) <= radius ** 2\n reconstructed[~reconstruction_circle] = 0.\n\n return reconstructed * np.pi / (2 * len(th))\n\n\ndef order_angles_golden_ratio(theta):\n \"\"\"\n Order angles to reduce the amount of correlated information\n in subsequent projections.\n\n Parameters\n ----------\n theta : 1D array of floats\n Projection angles in degrees. Duplicate angles are not allowed.\n\n Returns\n -------\n indices_generator : generator yielding unsigned integers\n The returned generator yields indices into ``theta`` such that\n ``theta[indices]`` gives the approximate golden ratio ordering\n of the projections. In total, ``len(theta)`` indices are yielded.\n All non-negative integers < ``len(theta)`` are yielded exactly once.\n\n Notes\n -----\n The method used here is that of the golden ratio introduced\n by T. Kohler.\n\n References\n ----------\n .. [1] Kohler, T. \"A projection access scheme for iterative\n reconstruction based on the golden section.\" Nuclear Science\n Symposium Conference Record, 2004 IEEE. Vol. 6. IEEE, 2004.\n .. [2] Winkelmann, Stefanie, et al. \"An optimal radial profile order\n based on the Golden Ratio for time-resolved MRI.\"\n Medical Imaging, IEEE Transactions on 26.1 (2007): 68-76.\n \"\"\"\n interval = 180\n\n def angle_distance(a, b):\n difference = a - b\n return min(abs(difference % interval), abs(difference % -interval))\n\n remaining = list(np.argsort(theta)) # indices into theta\n # yield an arbitrary angle to start things off\n index = remaining.pop(0)\n angle = theta[index]\n yield index\n # determine subsequent angles using the golden ratio method\n angle_increment = interval * (1 - (np.sqrt(5) - 1) / 2)\n while remaining:\n angle = (angle + angle_increment) % interval\n insert_point = np.searchsorted(theta[remaining], angle)\n index_below = insert_point - 1\n index_above = 0 if insert_point == len(remaining) else insert_point\n distance_below = angle_distance(angle, theta[remaining[index_below]])\n distance_above = angle_distance(angle, theta[remaining[index_above]])\n if distance_below < distance_above:\n yield remaining.pop(index_below)\n else:\n yield remaining.pop(index_above)\n\n\ndef iradon_sart(radon_image, theta=None, image=None, projection_shifts=None,\n clip=None, relaxation=0.15):\n \"\"\"\n Inverse radon transform\n\n Reconstruct an image from the radon transform, using a single iteration of\n the Simultaneous Algebraic Reconstruction Technique (SART) algorithm.\n\n Parameters\n ----------\n radon_image : 2D array, dtype=float\n Image containing radon transform (sinogram). Each column of\n the image corresponds to a projection along a different angle. The\n tomography rotation axis should lie at the pixel index\n ``radon_image.shape[0] // 2`` along the 0th dimension of\n ``radon_image``.\n theta : 1D array, dtype=float, optional\n Reconstruction angles (in degrees). Default: m angles evenly spaced\n between 0 and 180 (if the shape of `radon_image` is (N, M)).\n image : 2D array, dtype=float, optional\n Image containing an initial reconstruction estimate. Shape of this\n array should be ``(radon_image.shape[0], radon_image.shape[0])``. The\n default is an array of zeros.\n projection_shifts : 1D array, dtype=float\n Shift the projections contained in ``radon_image`` (the sinogram) by\n this many pixels before reconstructing the image. The i'th value\n defines the shift of the i'th column of ``radon_image``.\n clip : length-2 sequence of floats\n Force all values in the reconstructed tomogram to lie in the range\n ``[clip[0], clip[1]]``\n relaxation : float\n Relaxation parameter for the update step. A higher value can\n improve the convergence rate, but one runs the risk of instabilities.\n Values close to or higher than 1 are not recommended.\n\n Returns\n -------\n reconstructed : ndarray\n Reconstructed image. The rotation axis will be located in the pixel\n with indices\n ``(reconstructed.shape[0] // 2, reconstructed.shape[1] // 2)``.\n\n Notes\n -----\n Algebraic Reconstruction Techniques are based on formulating the tomography\n reconstruction problem as a set of linear equations. Along each ray,\n the projected value is the sum of all the values of the cross section along\n the ray. A typical feature of SART (and a few other variants of algebraic\n techniques) is that it samples the cross section at equidistant points\n along the ray, using linear interpolation between the pixel values of the\n cross section. The resulting set of linear equations are then solved using\n a slightly modified Kaczmarz method.\n\n When using SART, a single iteration is usually sufficient to obtain a good\n reconstruction. Further iterations will tend to enhance high-frequency\n information, but will also often increase the noise.\n\n References\n ----------\n .. [1] AC Kak, M Slaney, \"Principles of Computerized Tomographic\n Imaging\", IEEE Press 1988.\n .. [2] AH Andersen, AC Kak, \"Simultaneous algebraic reconstruction\n technique (SART): a superior implementation of the ART algorithm\",\n Ultrasonic Imaging 6 pp 81--94 (1984)\n .. [3] S Kaczmarz, \"Angenäherte auflösung von systemen linearer\n gleichungen\", Bulletin International de l’Academie Polonaise des\n Sciences et des Lettres 35 pp 355--357 (1937)\n .. [4] Kohler, T. \"A projection access scheme for iterative\n reconstruction based on the golden section.\" Nuclear Science\n Symposium Conference Record, 2004 IEEE. Vol. 6. IEEE, 2004.\n .. [5] Kaczmarz' method, Wikipedia,\n http://en.wikipedia.org/wiki/Kaczmarz_method\n \"\"\"\n if radon_image.ndim != 2:\n raise ValueError('radon_image must be two dimensional')\n reconstructed_shape = (radon_image.shape[0], radon_image.shape[0])\n if theta is None:\n theta = np.linspace(0, 180, radon_image.shape[1], endpoint=False)\n elif theta.shape != (radon_image.shape[1],):\n raise ValueError('Shape of theta (%s) does not match the '\n 'number of projections (%d)'\n % (projection_shifts.shape, radon_image.shape[1]))\n if image is None:\n image = np.zeros(reconstructed_shape, dtype=np.float)\n elif image.shape != reconstructed_shape:\n raise ValueError('Shape of image (%s) does not match first dimension '\n 'of radon_image (%s)'\n % (image.shape, reconstructed_shape))\n if projection_shifts is None:\n projection_shifts = np.zeros((radon_image.shape[1],), dtype=np.float)\n elif projection_shifts.shape != (radon_image.shape[1],):\n raise ValueError('Shape of projection_shifts (%s) does not match the '\n 'number of projections (%d)'\n % (projection_shifts.shape, radon_image.shape[1]))\n if not clip is None:\n if len(clip) != 2:\n raise ValueError('clip must be a length-2 sequence')\n clip = (float(clip[0]), float(clip[1]))\n relaxation = float(relaxation)\n\n for angle_index in order_angles_golden_ratio(theta):\n image_update = sart_projection_update(image, theta[angle_index],\n radon_image[:, angle_index],\n projection_shifts[angle_index])\n image += relaxation * image_update\n if not clip is None:\n image = np.clip(image, clip[0], clip[1])\n return image\n", "from numpy.testing import assert_array_equal, assert_almost_equal, \\\n assert_array_almost_equal, assert_raises, assert_equal\nimport numpy as np\nimport math\n\nfrom skimage.measure._regionprops import (regionprops, PROPS, perimeter,\n _parse_docs)\n\n\nSAMPLE = np.array(\n [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1],\n [0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]\n)\nINTENSITY_SAMPLE = SAMPLE.copy()\nINTENSITY_SAMPLE[1, 9:11] = 2\n\nSAMPLE_3D = np.zeros((6, 6, 6), dtype=np.uint8)\nSAMPLE_3D[1:3, 1:3, 1:3] = 1\nSAMPLE_3D[3, 2, 2] = 1 \nINTENSITY_SAMPLE_3D = SAMPLE_3D.copy()\n\ndef test_all_props():\n region = regionprops(SAMPLE, INTENSITY_SAMPLE)[0]\n for prop in PROPS:\n assert_almost_equal(region[prop], getattr(region, PROPS[prop]))\n\n\ndef test_all_props_3d():\n region = regionprops(SAMPLE_3D, INTENSITY_SAMPLE_3D)[0]\n for prop in PROPS:\n try:\n assert_almost_equal(region[prop], getattr(region, PROPS[prop]))\n except NotImplementedError:\n pass\n\ndef test_dtype():\n regionprops(np.zeros((10, 10), dtype=np.int))\n regionprops(np.zeros((10, 10), dtype=np.uint))\n assert_raises((TypeError), regionprops,\n np.zeros((10, 10), dtype=np.float))\n assert_raises((TypeError), regionprops,\n np.zeros((10, 10), dtype=np.double))\n\n\ndef test_ndim():\n regionprops(np.zeros((10, 10), dtype=np.int))\n regionprops(np.zeros((10, 10, 1), dtype=np.int))\n regionprops(np.zeros((10, 10, 1, 1), dtype=np.int))\n regionprops(np.zeros((10, 10, 10), dtype=np.int))\n assert_raises(TypeError, regionprops, np.zeros((10, 10, 10, 2), dtype=np.int))\n\n\ndef test_area():\n area = regionprops(SAMPLE)[0].area\n assert area == np.sum(SAMPLE)\n area = regionprops(SAMPLE_3D)[0].area\n assert area == np.sum(SAMPLE_3D)\n\n\ndef test_bbox():\n bbox = regionprops(SAMPLE)[0].bbox\n assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1]))\n\n SAMPLE_mod = SAMPLE.copy()\n SAMPLE_mod[:, -1] = 0\n bbox = regionprops(SAMPLE_mod)[0].bbox\n assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1]-1))\n\n bbox = regionprops(SAMPLE_3D)[0].bbox\n assert_array_almost_equal(bbox, (1, 1, 1, 4, 3, 3))\n\n\ndef test_moments_central():\n mu = regionprops(SAMPLE)[0].moments_central\n # determined with OpenCV\n assert_almost_equal(mu[0,2], 436.00000000000045)\n # different from OpenCV results, bug in OpenCV\n assert_almost_equal(mu[0,3], -737.333333333333)\n assert_almost_equal(mu[1,1], -87.33333333333303)\n assert_almost_equal(mu[1,2], -127.5555555555593)\n assert_almost_equal(mu[2,0], 1259.7777777777774)\n assert_almost_equal(mu[2,1], 2000.296296296291)\n assert_almost_equal(mu[3,0], -760.0246913580195)\n\n\ndef test_centroid():\n centroid = regionprops(SAMPLE)[0].centroid\n # determined with MATLAB\n assert_array_almost_equal(centroid, (5.66666666666666, 9.444444444444444))\n\n\ndef test_convex_area():\n area = regionprops(SAMPLE)[0].convex_area\n # determined with MATLAB\n assert area == 124\n\n\ndef test_convex_image():\n img = regionprops(SAMPLE)[0].convex_image\n # determined with MATLAB\n ref = np.array(\n [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\n )\n assert_array_equal(img, ref)\n\n\ndef test_coordinates():\n sample = np.zeros((10, 10), dtype=np.int8)\n coords = np.array([[3, 2], [3, 3], [3, 4]])\n sample[coords[:, 0], coords[:, 1]] = 1\n prop_coords = regionprops(sample)[0].coords\n assert_array_equal(prop_coords, coords)\n\n sample = np.zeros((6, 6, 6), dtype=np.int8)\n coords = np.array([[1, 1, 1], [1, 2, 1], [1, 3, 1]])\n sample[coords[:, 0], coords[:, 1], coords[:, 2]] = 1\n prop_coords = regionprops(sample)[0].coords\n assert_array_equal(prop_coords, coords)\n\ndef test_eccentricity():\n eps = regionprops(SAMPLE)[0].eccentricity\n assert_almost_equal(eps, 0.814629313427)\n\n img = np.zeros((5, 5), dtype=np.int)\n img[2, 2] = 1\n eps = regionprops(img)[0].eccentricity\n assert_almost_equal(eps, 0)\n\n\ndef test_equiv_diameter():\n diameter = regionprops(SAMPLE)[0].equivalent_diameter\n # determined with MATLAB\n assert_almost_equal(diameter, 9.57461472963)\n\n\ndef test_euler_number():\n en = regionprops(SAMPLE)[0].euler_number\n assert en == 1\n\n SAMPLE_mod = SAMPLE.copy()\n SAMPLE_mod[7, -3] = 0\n en = regionprops(SAMPLE_mod)[0].euler_number\n assert en == 0\n\n\ndef test_extent():\n extent = regionprops(SAMPLE)[0].extent\n assert_almost_equal(extent, 0.4)\n\n\ndef test_moments_hu():\n hu = regionprops(SAMPLE)[0].moments_hu\n ref = np.array([\n 3.27117627e-01,\n 2.63869194e-02,\n 2.35390060e-02,\n 1.23151193e-03,\n 1.38882330e-06,\n -2.72586158e-05,\n 6.48350653e-06\n ])\n # bug in OpenCV caused in Central Moments calculation?\n assert_array_almost_equal(hu, ref)\n\n\ndef test_image():\n img = regionprops(SAMPLE)[0].image\n assert_array_equal(img, SAMPLE)\n\n img = regionprops(SAMPLE_3D)[0].image\n assert_array_equal(img, SAMPLE_3D[1:4, 1:3, 1:3])\n\n\ndef test_label():\n label = regionprops(SAMPLE)[0].label\n assert_array_equal(label, 1)\n\n label = regionprops(SAMPLE_3D)[0].label\n assert_array_equal(label, 1)\n\n\ndef test_filled_area():\n area = regionprops(SAMPLE)[0].filled_area\n assert area == np.sum(SAMPLE)\n\n SAMPLE_mod = SAMPLE.copy()\n SAMPLE_mod[7, -3] = 0\n area = regionprops(SAMPLE_mod)[0].filled_area\n assert area == np.sum(SAMPLE)\n\n\ndef test_filled_image():\n img = regionprops(SAMPLE)[0].filled_image\n assert_array_equal(img, SAMPLE)\n\n\ndef test_major_axis_length():\n length = regionprops(SAMPLE)[0].major_axis_length\n # MATLAB has different interpretation of ellipse than found in literature,\n # here implemented as found in literature\n assert_almost_equal(length, 16.7924234999)\n\n\ndef test_max_intensity():\n intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].max_intensity\n assert_almost_equal(intensity, 2)\n\n\ndef test_mean_intensity():\n intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].mean_intensity\n assert_almost_equal(intensity, 1.02777777777777)\n\n\ndef test_min_intensity():\n intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].min_intensity\n assert_almost_equal(intensity, 1)\n\n\ndef test_minor_axis_length():\n length = regionprops(SAMPLE)[0].minor_axis_length\n # MATLAB has different interpretation of ellipse than found in literature,\n # here implemented as found in literature\n assert_almost_equal(length, 9.739302807263)\n\n\ndef test_moments():\n m = regionprops(SAMPLE)[0].moments\n # determined with OpenCV\n assert_almost_equal(m[0,0], 72.0)\n assert_almost_equal(m[0,1], 408.0)\n assert_almost_equal(m[0,2], 2748.0)\n assert_almost_equal(m[0,3], 19776.0)\n assert_almost_equal(m[1,0], 680.0)\n assert_almost_equal(m[1,1], 3766.0)\n assert_almost_equal(m[1,2], 24836.0)\n assert_almost_equal(m[2,0], 7682.0)\n assert_almost_equal(m[2,1], 43882.0)\n assert_almost_equal(m[3,0], 95588.0)\n\n\ndef test_moments_normalized():\n nu = regionprops(SAMPLE)[0].moments_normalized\n # determined with OpenCV\n assert_almost_equal(nu[0,2], 0.08410493827160502)\n assert_almost_equal(nu[1,1], -0.016846707818929982)\n assert_almost_equal(nu[1,2], -0.002899800614433943)\n assert_almost_equal(nu[2,0], 0.24301268861454037)\n assert_almost_equal(nu[2,1], 0.045473992910668816)\n assert_almost_equal(nu[3,0], -0.017278118992041805)\n\n\ndef test_orientation():\n orientation = regionprops(SAMPLE)[0].orientation\n # determined with MATLAB\n assert_almost_equal(orientation, 0.10446844651921)\n # test correct quadrant determination\n orientation2 = regionprops(SAMPLE.T)[0].orientation\n assert_almost_equal(orientation2, math.pi / 2 - orientation)\n # test diagonal regions\n diag = np.eye(10, dtype=int)\n orientation_diag = regionprops(diag)[0].orientation\n assert_almost_equal(orientation_diag, -math.pi / 4)\n orientation_diag = regionprops(np.flipud(diag))[0].orientation\n assert_almost_equal(orientation_diag, math.pi / 4)\n orientation_diag = regionprops(np.fliplr(diag))[0].orientation\n assert_almost_equal(orientation_diag, math.pi / 4)\n orientation_diag = regionprops(np.fliplr(np.flipud(diag)))[0].orientation\n assert_almost_equal(orientation_diag, -math.pi / 4)\n\n\ndef test_perimeter():\n per = regionprops(SAMPLE)[0].perimeter\n assert_almost_equal(per, 55.2487373415)\n\n per = perimeter(SAMPLE.astype('double'), neighbourhood=8)\n assert_almost_equal(per, 46.8284271247)\n\n\ndef test_solidity():\n solidity = regionprops(SAMPLE)[0].solidity\n # determined with MATLAB\n assert_almost_equal(solidity, 0.580645161290323)\n\n\ndef test_weighted_moments_central():\n wmu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].weighted_moments_central\n ref = np.array(\n [[ 7.4000000000e+01, -2.1316282073e-13, 4.7837837838e+02,\n -7.5943608473e+02],\n [ 3.7303493627e-14, -8.7837837838e+01, -1.4801314828e+02,\n -1.2714707125e+03],\n [ 1.2602837838e+03, 2.1571526662e+03, 6.6989799420e+03,\n 1.5304076361e+04],\n [ -7.6561796932e+02, -4.2385971907e+03, -9.9501164076e+03,\n -3.3156729271e+04]]\n )\n np.set_printoptions(precision=10)\n assert_array_almost_equal(wmu, ref)\n\n\ndef test_weighted_centroid():\n centroid = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].weighted_centroid\n assert_array_almost_equal(centroid, (5.540540540540, 9.445945945945))\n\n\ndef test_weighted_moments_hu():\n whu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].weighted_moments_hu\n ref = np.array([\n 3.1750587329e-01,\n 2.1417517159e-02,\n 2.3609322038e-02,\n 1.2565683360e-03,\n 8.3014209421e-07,\n -3.5073773473e-05,\n 6.7936409056e-06\n ])\n assert_array_almost_equal(whu, ref)\n\n\ndef test_weighted_moments():\n wm = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].weighted_moments\n ref = np.array(\n [[ 7.4000000000e+01, 4.1000000000e+02, 2.7500000000e+03,\n 1.9778000000e+04],\n [ 6.9900000000e+02, 3.7850000000e+03, 2.4855000000e+04,\n 1.7500100000e+05],\n [ 7.8630000000e+03, 4.4063000000e+04, 2.9347700000e+05,\n 2.0810510000e+06],\n [ 9.7317000000e+04, 5.7256700000e+05, 3.9007170000e+06,\n 2.8078871000e+07]]\n )\n assert_array_almost_equal(wm, ref)\n\n\ndef test_weighted_moments_normalized():\n wnu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].weighted_moments_normalized\n ref = np.array(\n [[ np.nan, np.nan, 0.0873590903, -0.0161217406],\n [ np.nan, -0.0160405109, -0.0031421072, -0.0031376984],\n [ 0.230146783, 0.0457932622, 0.0165315478, 0.0043903193],\n [-0.0162529732, -0.0104598869, -0.0028544152, -0.0011057191]]\n )\n assert_array_almost_equal(wnu, ref)\n\n\ndef test_label_sequence():\n a = np.empty((2, 2), dtype=np.int)\n a[:, :] = 2\n ps = regionprops(a)\n assert len(ps) == 1\n assert ps[0].label == 2\n\n\ndef test_pure_background():\n a = np.zeros((2, 2), dtype=np.int)\n ps = regionprops(a)\n assert len(ps) == 0\n\n\ndef test_invalid():\n ps = regionprops(SAMPLE)\n\n def get_intensity_image():\n ps[0].intensity_image\n\n assert_raises(AttributeError, get_intensity_image)\n\n\ndef test_invalid_size():\n wrong_intensity_sample = np.array([[1], [1]])\n assert_raises(ValueError, regionprops, SAMPLE, wrong_intensity_sample)\n\n\ndef test_equals():\n arr = np.zeros((100, 100), dtype=np.int)\n arr[0:25, 0:25] = 1\n arr[50:99, 50:99] = 2\n\n regions = regionprops(arr)\n r1 = regions[0]\n\n regions = regionprops(arr)\n r2 = regions[0]\n r3 = regions[1]\n\n assert_equal(r1 == r2, True, \"Same regionprops are not equal\")\n assert_equal(r1 != r3, True, \"Different regionprops are equal\")\n\n\ndef test_iterate_all_props():\n region = regionprops(SAMPLE)[0]\n p0 = dict((p, region[p]) for p in region)\n\n region = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[0]\n p1 = dict((p, region[p]) for p in region)\n\n assert len(p0) < len(p1)\n\n\ndef test_cache():\n region = regionprops(SAMPLE)[0]\n f0 = region.filled_image\n region._label_image[:10] = 1\n f1 = region.filled_image\n\n # Changed underlying image, but cache keeps result the same\n assert_array_equal(f0, f1)\n\n # Now invalidate cache\n region._cache_active = False\n f1 = region.filled_image\n\n assert np.any(f0 != f1)\n\n\ndef test_docstrings_and_props():\n region = regionprops(SAMPLE)[0]\n\n docs = _parse_docs()\n props = [m for m in dir(region) if not m.startswith('_')]\n\n nr_docs_parsed = len(docs)\n nr_props = len(props)\n assert_equal(nr_docs_parsed, nr_props)\n\n ds = docs['weighted_moments_normalized']\n assert 'iteration' not in ds\n assert len(ds.split('\\n')) > 3\n\n\nif __name__ == \"__main__\":\n from numpy.testing import run_module_suite\n run_module_suite()\n", "import numpy as np\nfrom numpy.testing import (assert_equal,\n assert_almost_equal,\n assert_raises)\n\nimport skimage\nfrom skimage import data\nfrom skimage._shared._warnings import expected_warnings\nfrom skimage.filters.thresholding import (threshold_adaptive,\n threshold_otsu,\n threshold_li,\n threshold_yen,\n threshold_isodata)\n\n\nclass TestSimpleImage():\n def setup(self):\n self.image = np.array([[0, 0, 1, 3, 5],\n [0, 1, 4, 3, 4],\n [1, 2, 5, 4, 1],\n [2, 4, 5, 2, 1],\n [4, 5, 1, 0, 0]], dtype=int)\n\n def test_otsu(self):\n assert threshold_otsu(self.image) == 2\n\n def test_otsu_negative_int(self):\n image = self.image - 2\n assert threshold_otsu(image) == 0\n\n def test_otsu_float_image(self):\n image = np.float64(self.image)\n assert 2 <= threshold_otsu(image) < 3\n\n def test_li(self):\n assert int(threshold_li(self.image)) == 2\n\n def test_li_negative_int(self):\n image = self.image - 2\n assert int(threshold_li(image)) == 0\n\n def test_li_float_image(self):\n image = np.float64(self.image)\n assert 2 <= threshold_li(image) < 3\n\n def test_yen(self):\n assert threshold_yen(self.image) == 2\n\n def test_yen_negative_int(self):\n image = self.image - 2\n assert threshold_yen(image) == 0\n\n def test_yen_float_image(self):\n image = np.float64(self.image)\n assert 2 <= threshold_yen(image) < 3\n\n def test_yen_arange(self):\n image = np.arange(256)\n assert threshold_yen(image) == 127\n\n def test_yen_binary(self):\n image = np.zeros([2,256], dtype=np.uint8)\n image[0] = 255\n assert threshold_yen(image) < 1\n\n def test_yen_blank_zero(self):\n image = np.zeros((5, 5), dtype=np.uint8)\n assert threshold_yen(image) == 0\n\n def test_yen_blank_max(self):\n image = np.empty((5, 5), dtype=np.uint8)\n image.fill(255)\n assert threshold_yen(image) == 255\n\n def test_isodata(self):\n assert threshold_isodata(self.image) == 2\n assert threshold_isodata(self.image, return_all=True) == [2]\n\n def test_isodata_blank_zero(self):\n image = np.zeros((5, 5), dtype=np.uint8)\n assert threshold_isodata(image) == 0\n assert threshold_isodata(image, return_all=True) == [0]\n\n def test_isodata_linspace(self):\n image = np.linspace(-127, 0, 256)\n assert -63.8 < threshold_isodata(image) < -63.6\n assert_almost_equal(threshold_isodata(image, return_all=True),\n [-63.74804688, -63.25195312])\n\n def test_isodata_16bit(self):\n np.random.seed(0)\n imfloat = np.random.rand(256, 256)\n assert 0.49 < threshold_isodata(imfloat, nbins=1024) < 0.51\n assert all(0.49 < threshold_isodata(imfloat, nbins=1024,\n return_all=True))\n\n def test_threshold_adaptive_generic(self):\n def func(arr):\n return arr.sum() / arr.shape[0]\n ref = np.array(\n [[False, False, False, False, True],\n [False, False, True, False, True],\n [False, False, True, True, False],\n [False, True, True, False, False],\n [ True, True, False, False, False]]\n )\n out = threshold_adaptive(self.image, 3, method='generic', param=func)\n assert_equal(ref, out)\n\n def test_threshold_adaptive_gaussian(self):\n ref = np.array(\n [[False, False, False, False, True],\n [False, False, True, False, True],\n [False, False, True, True, False],\n [False, True, True, False, False],\n [ True, True, False, False, False]]\n )\n out = threshold_adaptive(self.image, 3, method='gaussian')\n assert_equal(ref, out)\n\n out = threshold_adaptive(self.image, 3, method='gaussian', param=1.0 / 3.0)\n assert_equal(ref, out)\n\n def test_threshold_adaptive_mean(self):\n ref = np.array(\n [[False, False, False, False, True],\n [False, False, True, False, True],\n [False, False, True, True, False],\n [False, True, True, False, False],\n [ True, True, False, False, False]]\n )\n out = threshold_adaptive(self.image, 3, method='mean')\n assert_equal(ref, out)\n\n def test_threshold_adaptive_median(self):\n ref = np.array(\n [[False, False, False, False, True],\n [False, False, True, False, False],\n [False, False, True, False, False],\n [False, False, True, True, False],\n [False, True, False, False, False]]\n )\n out = threshold_adaptive(self.image, 3, method='median')\n assert_equal(ref, out)\n\n\ndef test_otsu_camera_image():\n camera = skimage.img_as_ubyte(data.camera())\n assert 86 < threshold_otsu(camera) < 88\n\n\ndef test_otsu_coins_image():\n coins = skimage.img_as_ubyte(data.coins())\n assert 106 < threshold_otsu(coins) < 108\n\n\ndef test_otsu_coins_image_as_float():\n coins = skimage.img_as_float(data.coins())\n assert 0.41 < threshold_otsu(coins) < 0.42\n\n\ndef test_otsu_astro_image():\n img = skimage.img_as_ubyte(data.astronaut())\n with expected_warnings(['grayscale']):\n assert 109 < threshold_otsu(img) < 111\n\n\ndef test_otsu_one_color_image():\n img = np.ones((10, 10), dtype=np.uint8)\n assert_raises(TypeError, threshold_otsu, img)\n\ndef test_li_camera_image():\n camera = skimage.img_as_ubyte(data.camera())\n assert 63 < threshold_li(camera) < 65\n\n\ndef test_li_coins_image():\n coins = skimage.img_as_ubyte(data.coins())\n assert 95 < threshold_li(coins) < 97\n\n\ndef test_li_coins_image_as_float():\n coins = skimage.img_as_float(data.coins())\n assert 0.37 < threshold_li(coins) < 0.38\n\n\ndef test_li_astro_image():\n img = skimage.img_as_ubyte(data.astronaut())\n assert 66 < threshold_li(img) < 68\n\ndef test_yen_camera_image():\n camera = skimage.img_as_ubyte(data.camera())\n assert 197 < threshold_yen(camera) < 199\n\n\ndef test_yen_coins_image():\n coins = skimage.img_as_ubyte(data.coins())\n assert 109 < threshold_yen(coins) < 111\n\n\ndef test_yen_coins_image_as_float():\n coins = skimage.img_as_float(data.coins())\n assert 0.43 < threshold_yen(coins) < 0.44\n\n\ndef test_adaptive_even_block_size_error():\n img = data.camera()\n assert_raises(ValueError, threshold_adaptive, img, block_size=4)\n\n\ndef test_isodata_camera_image():\n camera = skimage.img_as_ubyte(data.camera())\n\n threshold = threshold_isodata(camera)\n assert np.floor((camera[camera <= threshold].mean() +\n camera[camera > threshold].mean()) / 2.0) == threshold\n assert threshold == 87\n\n assert threshold_isodata(camera, return_all=True) == [87]\n\n\ndef test_isodata_coins_image():\n coins = skimage.img_as_ubyte(data.coins())\n\n threshold = threshold_isodata(coins)\n assert np.floor((coins[coins <= threshold].mean() +\n coins[coins > threshold].mean()) / 2.0) == threshold\n assert threshold == 107\n\n assert threshold_isodata(coins, return_all=True) == [107]\n\n\ndef test_isodata_moon_image():\n moon = skimage.img_as_ubyte(data.moon())\n\n threshold = threshold_isodata(moon)\n assert np.floor((moon[moon <= threshold].mean() +\n moon[moon > threshold].mean()) / 2.0) == threshold\n assert threshold == 86\n\n thresholds = threshold_isodata(moon, return_all=True)\n for threshold in thresholds:\n assert np.floor((moon[moon <= threshold].mean() +\n moon[moon > threshold].mean()) / 2.0) == threshold\n assert_equal(thresholds, [86, 87, 88, 122, 123, 124, 139, 140])\n\n\ndef test_isodata_moon_image_negative_int():\n moon = skimage.img_as_ubyte(data.moon()).astype(np.int32)\n moon -= 100\n\n threshold = threshold_isodata(moon)\n assert np.floor((moon[moon <= threshold].mean() +\n moon[moon > threshold].mean()) / 2.0) == threshold\n assert threshold == -14\n\n thresholds = threshold_isodata(moon, return_all=True)\n for threshold in thresholds:\n assert np.floor((moon[moon <= threshold].mean() +\n moon[moon > threshold].mean()) / 2.0) == threshold\n assert_equal(thresholds, [-14, -13, -12, 22, 23, 24, 39, 40])\n\n\ndef test_isodata_moon_image_negative_float():\n moon = skimage.img_as_ubyte(data.moon()).astype(np.float64)\n moon -= 100\n\n assert -14 < threshold_isodata(moon) < -13\n\n thresholds = threshold_isodata(moon, return_all=True)\n assert_almost_equal(thresholds,\n [-13.83789062, -12.84179688, -11.84570312, 22.02148438,\n 23.01757812, 24.01367188, 38.95507812, 39.95117188])\n\n\nif __name__ == '__main__':\n np.testing.run_module_suite()\n", "import os.path\nimport numpy as np\nfrom numpy.testing import (\n assert_array_equal, assert_array_almost_equal, assert_raises,\n assert_allclose, run_module_suite)\n\nfrom tempfile import NamedTemporaryFile\n\nfrom ... import data_dir, img_as_float\nfrom .. import imread, imsave, use_plugin, reset_plugins\nfrom ..._shared.testing import mono_check, color_check\nfrom ..._shared._warnings import expected_warnings\nfrom ..._shared._tempfile import temporary_file\n\nfrom six import BytesIO\n\nfrom PIL import Image\nfrom .._plugins.pil_plugin import (\n pil_to_ndarray, ndarray_to_pil, _palette_is_grayscale)\nfrom ...measure import compare_ssim as ssim\nfrom ...color import rgb2lab\n\n\ndef setup():\n use_plugin('pil')\n\n\ndef teardown():\n reset_plugins()\n\n\ndef setup_module(self):\n \"\"\"The effect of the `plugin.use` call may be overridden by later imports.\n Call `use_plugin` directly before the tests to ensure that PIL is used.\n\n \"\"\"\n try:\n use_plugin('pil')\n except ImportError:\n pass\n\ndef test_png_round_trip():\n f = NamedTemporaryFile(suffix='.png')\n fname = f.name\n f.close()\n I = np.eye(3)\n imsave(fname, I)\n Ip = img_as_float(imread(fname))\n os.remove(fname)\n assert np.sum(np.abs(Ip-I)) < 1e-3\n\ndef test_imread_flatten():\n # a color image is flattened\n img = imread(os.path.join(data_dir, 'color.png'), flatten=True)\n assert img.ndim == 2\n assert img.dtype == np.float64\n img = imread(os.path.join(data_dir, 'camera.png'), flatten=True)\n # check that flattening does not occur for an image that is grey already.\n assert np.sctype2char(img.dtype) in np.typecodes['AllInteger']\n\n\ndef test_imread_separate_channels():\n # Test that imread returns RGBA values contiguously even when they are\n # stored in separate planes.\n x = np.random.rand(3, 16, 8)\n f = NamedTemporaryFile(suffix='.tif')\n fname = f.name\n f.close()\n imsave(fname, x)\n img = imread(fname)\n os.remove(fname)\n assert img.shape == (16, 8, 3), img.shape\n\n\ndef test_imread_multipage_rgb_tif():\n img = imread(os.path.join(data_dir, 'multipage_rgb.tif'))\n assert img.shape == (2, 10, 10, 3), img.shape\n\n\ndef test_imread_palette():\n img = imread(os.path.join(data_dir, 'palette_gray.png'))\n assert img.ndim == 2\n img = imread(os.path.join(data_dir, 'palette_color.png'))\n assert img.ndim == 3\n\n\ndef test_imread_index_png_with_alpha():\n # The file `foo3x5x4indexed.png` was created with this array\n # (3x5 is (height)x(width)):\n data = np.array([[[127, 0, 255, 255],\n [127, 0, 255, 255],\n [127, 0, 255, 255],\n [127, 0, 255, 255],\n [127, 0, 255, 255]],\n [[192, 192, 255, 0],\n [192, 192, 255, 0],\n [0, 0, 255, 0],\n [0, 0, 255, 0],\n [0, 0, 255, 0]],\n [[0, 31, 255, 255],\n [0, 31, 255, 255],\n [0, 31, 255, 255],\n [0, 31, 255, 255],\n [0, 31, 255, 255]]], dtype=np.uint8)\n img = imread(os.path.join(data_dir, 'foo3x5x4indexed.png'))\n assert_array_equal(img, data)\n\n\ndef test_palette_is_gray():\n gray = Image.open(os.path.join(data_dir, 'palette_gray.png'))\n assert _palette_is_grayscale(gray)\n color = Image.open(os.path.join(data_dir, 'palette_color.png'))\n assert not _palette_is_grayscale(color)\n\n\ndef test_bilevel():\n expected = np.zeros((10, 10))\n expected[::2] = 255\n\n img = imread(os.path.join(data_dir, 'checker_bilevel.png'))\n assert_array_equal(img, expected)\n\n\ndef test_imread_uint16():\n expected = np.load(os.path.join(data_dir, 'chessboard_GRAY_U8.npy'))\n img = imread(os.path.join(data_dir, 'chessboard_GRAY_U16.tif'))\n assert np.issubdtype(img.dtype, np.uint16)\n assert_array_almost_equal(img, expected)\n\n\ndef test_imread_truncated_jpg():\n assert_raises((IOError, ValueError), imread,\n os.path.join(data_dir, 'truncated.jpg'))\n\n\ndef test_jpg_quality_arg():\n chessboard = np.load(os.path.join(data_dir, 'chessboard_GRAY_U8.npy'))\n with temporary_file(suffix='.jpg') as jpg:\n imsave(jpg, chessboard, quality=95)\n im = imread(jpg)\n sim = ssim(chessboard, im,\n dynamic_range=chessboard.max() - chessboard.min())\n assert sim > 0.99\n\n\ndef test_imread_uint16_big_endian():\n expected = np.load(os.path.join(data_dir, 'chessboard_GRAY_U8.npy'))\n img = imread(os.path.join(data_dir, 'chessboard_GRAY_U16B.tif'))\n assert img.dtype == np.uint16\n assert_array_almost_equal(img, expected)\n\n\nclass TestSave:\n def roundtrip_file(self, x):\n with temporary_file(suffix='.png') as fname:\n imsave(fname, x)\n y = imread(fname)\n return y\n\n def roundtrip_pil_image(self, x):\n pil_image = ndarray_to_pil(x)\n y = pil_to_ndarray(pil_image)\n return y\n\n def verify_roundtrip(self, dtype, x, y, scaling=1):\n assert_array_almost_equal((x * scaling).astype(np.int32), y)\n\n def verify_imsave_roundtrip(self, roundtrip_function):\n for shape in [(10, 10), (10, 10, 3), (10, 10, 4)]:\n for dtype in (np.uint8, np.uint16, np.float32, np.float64):\n x = np.ones(shape, dtype=dtype) * np.random.rand(*shape)\n\n if np.issubdtype(dtype, float):\n yield (self.verify_roundtrip, dtype, x,\n roundtrip_function(x), 255)\n else:\n x = (x * 255).astype(dtype)\n yield (self.verify_roundtrip, dtype, x,\n roundtrip_function(x))\n\n def test_imsave_roundtrip_file(self):\n self.verify_imsave_roundtrip(self.roundtrip_file)\n\n def test_imsave_roundtrip_pil_image(self):\n self.verify_imsave_roundtrip(self.roundtrip_pil_image)\n\n\ndef test_imsave_filelike():\n shape = (2, 2)\n image = np.zeros(shape)\n s = BytesIO()\n\n # save to file-like object\n with expected_warnings(['precision loss',\n 'is a low contrast image']):\n imsave(s, image)\n\n # read from file-like object\n s.seek(0)\n out = imread(s)\n assert out.shape == shape\n assert_allclose(out, image)\n\n\ndef test_imexport_imimport():\n shape = (2, 2)\n image = np.zeros(shape)\n with expected_warnings(['precision loss']):\n pil_image = ndarray_to_pil(image)\n out = pil_to_ndarray(pil_image)\n assert out.shape == shape\n\n\ndef test_all_color():\n color_check('pil')\n color_check('pil', 'bmp')\n\n\ndef test_all_mono():\n mono_check('pil')\n\n\ndef test_multi_page_gif():\n img = imread(os.path.join(data_dir, 'no_time_for_that_tiny.gif'))\n assert img.shape == (24, 25, 14, 3), img.shape\n img2 = imread(os.path.join(data_dir, 'no_time_for_that_tiny.gif'),\n img_num=5)\n assert img2.shape == (25, 14, 3)\n assert_allclose(img[5], img2)\n\n\ndef test_cmyk():\n ref = imread(os.path.join(data_dir, 'color.png'))\n\n img = Image.open(os.path.join(data_dir, 'color.png'))\n img = img.convert('CMYK')\n\n f = NamedTemporaryFile(suffix='.jpg')\n fname = f.name\n f.close()\n img.save(fname)\n try:\n img.close()\n except AttributeError: # `close` not available on PIL\n pass\n\n new = imread(fname)\n\n ref_lab = rgb2lab(ref)\n new_lab = rgb2lab(new)\n\n for i in range(3):\n newi = np.ascontiguousarray(new_lab[:, :, i])\n refi = np.ascontiguousarray(ref_lab[:, :, i])\n sim = ssim(refi, newi, dynamic_range=refi.max() - refi.min())\n assert sim > 0.99\n\nif __name__ == \"__main__\":\n run_module_suite()\n", "import six\nimport math\nimport numpy as np\nfrom scipy import spatial\nfrom scipy import ndimage as ndi\n\nfrom .._shared.utils import (get_bound_method_class, safe_as_int,\n _mode_deprecations, warn)\nfrom ..util import img_as_float\n\nfrom ._warps_cy import _warp_fast\n\n\ndef _to_ndimage_mode(mode):\n \"\"\"Convert from `numpy.pad` mode name to the corresponding ndimage mode.\"\"\"\n mode = _mode_deprecations(mode.lower())\n mode_translation_dict = dict(edge='nearest', symmetric='reflect',\n reflect='mirror')\n if mode in mode_translation_dict:\n mode = mode_translation_dict[mode]\n return mode\n\n\ndef _center_and_normalize_points(points):\n \"\"\"Center and normalize image points.\n\n The points are transformed in a two-step procedure that is expressed\n as a transformation matrix. The matrix of the resulting points is usually\n better conditioned than the matrix of the original points.\n\n Center the image points, such that the new coordinate system has its\n origin at the centroid of the image points.\n\n Normalize the image points, such that the mean distance from the points\n to the origin of the coordinate system is sqrt(2).\n\n Parameters\n ----------\n points : (N, 2) array\n The coordinates of the image points.\n\n Returns\n -------\n matrix : (3, 3) array\n The transformation matrix to obtain the new points.\n new_points : (N, 2) array\n The transformed image points.\n\n \"\"\"\n\n centroid = np.mean(points, axis=0)\n\n rms = math.sqrt(np.sum((points - centroid) ** 2) / points.shape[0])\n\n norm_factor = math.sqrt(2) / rms\n\n matrix = np.array([[norm_factor, 0, -norm_factor * centroid[0]],\n [0, norm_factor, -norm_factor * centroid[1]],\n [0, 0, 1]])\n\n pointsh = np.row_stack([points.T, np.ones((points.shape[0]),)])\n\n new_pointsh = np.dot(matrix, pointsh).T\n\n new_points = new_pointsh[:, :2]\n new_points[:, 0] /= new_pointsh[:, 2]\n new_points[:, 1] /= new_pointsh[:, 2]\n\n return matrix, new_points\n\n\nclass GeometricTransform(object):\n \"\"\"Perform geometric transformations on a set of coordinates.\n\n \"\"\"\n def __call__(self, coords):\n \"\"\"Apply forward transformation.\n\n Parameters\n ----------\n coords : (N, 2) array\n Source coordinates.\n\n Returns\n -------\n coords : (N, 2) array\n Transformed coordinates.\n\n \"\"\"\n raise NotImplementedError()\n\n def inverse(self, coords):\n \"\"\"Apply inverse transformation.\n\n Parameters\n ----------\n coords : (N, 2) array\n Source coordinates.\n\n Returns\n -------\n coords : (N, 2) array\n Transformed coordinates.\n\n \"\"\"\n raise NotImplementedError()\n\n def residuals(self, src, dst):\n \"\"\"Determine residuals of transformed destination coordinates.\n\n For each transformed source coordinate the euclidean distance to the\n respective destination coordinate is determined.\n\n Parameters\n ----------\n src : (N, 2) array\n Source coordinates.\n dst : (N, 2) array\n Destination coordinates.\n\n Returns\n -------\n residuals : (N, ) array\n Residual for coordinate.\n\n \"\"\"\n\n return np.sqrt(np.sum((self(src) - dst)**2, axis=1))\n\n def __add__(self, other):\n \"\"\"Combine this transformation with another.\n\n \"\"\"\n raise NotImplementedError()\n\n\nclass ProjectiveTransform(GeometricTransform):\n \"\"\"Matrix transformation.\n\n Apply a projective transformation (homography) on coordinates.\n\n For each homogeneous coordinate :math:`\\mathbf{x} = [x, y, 1]^T`, its\n target position is calculated by multiplying with the given matrix,\n :math:`H`, to give :math:`H \\mathbf{x}`::\n\n [[a0 a1 a2]\n [b0 b1 b2]\n [c0 c1 1 ]].\n\n E.g., to rotate by theta degrees clockwise, the matrix should be::\n\n [[cos(theta) -sin(theta) 0]\n [sin(theta) cos(theta) 0]\n [0 0 1]]\n\n or, to translate x by 10 and y by 20::\n\n [[1 0 10]\n [0 1 20]\n [0 0 1 ]].\n\n Parameters\n ----------\n matrix : (3, 3) array, optional\n Homogeneous transformation matrix.\n\n Attributes\n ----------\n params : (3, 3) array\n Homogeneous transformation matrix.\n\n \"\"\"\n\n _coeffs = range(8)\n\n def __init__(self, matrix=None):\n if matrix is None:\n # default to an identity transform\n matrix = np.eye(3)\n if matrix.shape != (3, 3):\n raise ValueError(\"invalid shape of transformation matrix\")\n self.params = matrix\n\n @property\n def _inv_matrix(self):\n return np.linalg.inv(self.params)\n\n def _apply_mat(self, coords, matrix):\n coords = np.array(coords, copy=False, ndmin=2)\n\n x, y = np.transpose(coords)\n src = np.vstack((x, y, np.ones_like(x)))\n dst = np.dot(src.transpose(), matrix.transpose())\n\n # rescale to homogeneous coordinates\n dst[:, 0] /= dst[:, 2]\n dst[:, 1] /= dst[:, 2]\n\n return dst[:, :2]\n\n def __call__(self, coords):\n return self._apply_mat(coords, self.params)\n\n def inverse(self, coords):\n \"\"\"Apply inverse transformation.\n\n Parameters\n ----------\n coords : (N, 2) array\n Source coordinates.\n\n Returns\n -------\n coords : (N, 2) array\n Transformed coordinates.\n\n \"\"\"\n return self._apply_mat(coords, self._inv_matrix)\n\n def estimate(self, src, dst):\n \"\"\"Set the transformation matrix with the explicit transformation\n parameters.\n\n You can determine the over-, well- and under-determined parameters\n with the total least-squares method.\n\n Number of source and destination coordinates must match.\n\n The transformation is defined as::\n\n X = (a0*x + a1*y + a2) / (c0*x + c1*y + 1)\n Y = (b0*x + b1*y + b2) / (c0*x + c1*y + 1)\n\n These equations can be transformed to the following form::\n\n 0 = a0*x + a1*y + a2 - c0*x*X - c1*y*X - X\n 0 = b0*x + b1*y + b2 - c0*x*Y - c1*y*Y - Y\n\n which exist for each set of corresponding points, so we have a set of\n N * 2 equations. The coefficients appear linearly so we can write\n A x = 0, where::\n\n A = [[x y 1 0 0 0 -x*X -y*X -X]\n [0 0 0 x y 1 -x*Y -y*Y -Y]\n ...\n ...\n ]\n x.T = [a0 a1 a2 b0 b1 b2 c0 c1 c3]\n\n In case of total least-squares the solution of this homogeneous system\n of equations is the right singular vector of A which corresponds to the\n smallest singular value normed by the coefficient c3.\n\n In case of the affine transformation the coefficients c0 and c1 are 0.\n Thus the system of equations is::\n\n A = [[x y 1 0 0 0 -X]\n [0 0 0 x y 1 -Y]\n ...\n ...\n ]\n x.T = [a0 a1 a2 b0 b1 b2 c3]\n\n Parameters\n ----------\n src : (N, 2) array\n Source coordinates.\n dst : (N, 2) array\n Destination coordinates.\n\n Returns\n -------\n success : bool\n True, if model estimation succeeds.\n\n \"\"\"\n\n try:\n src_matrix, src = _center_and_normalize_points(src)\n dst_matrix, dst = _center_and_normalize_points(dst)\n except ZeroDivisionError:\n self.params = np.nan * np.empty((3, 3))\n return False\n\n xs = src[:, 0]\n ys = src[:, 1]\n xd = dst[:, 0]\n yd = dst[:, 1]\n rows = src.shape[0]\n\n # params: a0, a1, a2, b0, b1, b2, c0, c1\n A = np.zeros((rows * 2, 9))\n A[:rows, 0] = xs\n A[:rows, 1] = ys\n A[:rows, 2] = 1\n A[:rows, 6] = - xd * xs\n A[:rows, 7] = - xd * ys\n A[rows:, 3] = xs\n A[rows:, 4] = ys\n A[rows:, 5] = 1\n A[rows:, 6] = - yd * xs\n A[rows:, 7] = - yd * ys\n A[:rows, 8] = xd\n A[rows:, 8] = yd\n\n # Select relevant columns, depending on params\n A = A[:, list(self._coeffs) + [8]]\n\n _, _, V = np.linalg.svd(A)\n\n H = np.zeros((3, 3))\n # solution is right singular vector that corresponds to smallest\n # singular value\n H.flat[list(self._coeffs) + [8]] = - V[-1, :-1] / V[-1, -1]\n H[2, 2] = 1\n\n # De-center and de-normalize\n H = np.dot(np.linalg.inv(dst_matrix), np.dot(H, src_matrix))\n\n self.params = H\n\n return True\n\n def __add__(self, other):\n \"\"\"Combine this transformation with another.\n\n \"\"\"\n if isinstance(other, ProjectiveTransform):\n # combination of the same types result in a transformation of this\n # type again, otherwise use general projective transformation\n if type(self) == type(other):\n tform = self.__class__\n else:\n tform = ProjectiveTransform\n return tform(other.params.dot(self.params))\n elif (hasattr(other, '__name__')\n and other.__name__ == 'inverse'\n and hasattr(get_bound_method_class(other), '_inv_matrix')):\n return ProjectiveTransform(self._inv_matrix.dot(self.params))\n else:\n raise TypeError(\"Cannot combine transformations of differing \"\n \"types.\")\n\n\nclass AffineTransform(ProjectiveTransform):\n\n \"\"\"2D affine transformation of the form:\n\n ..:math:\n\n X = a0*x + a1*y + a2 =\n = sx*x*cos(rotation) - sy*y*sin(rotation + shear) + a2\n\n Y = b0*x + b1*y + b2 =\n = sx*x*sin(rotation) + sy*y*cos(rotation + shear) + b2\n\n where ``sx`` and ``sy`` are zoom factors in the x and y directions,\n and the homogeneous transformation matrix is::\n\n [[a0 a1 a2]\n [b0 b1 b2]\n [0 0 1]]\n\n Parameters\n ----------\n matrix : (3, 3) array, optional\n Homogeneous transformation matrix.\n scale : (sx, sy) as array, list or tuple, optional\n Scale factors.\n rotation : float, optional\n Rotation angle in counter-clockwise direction as radians.\n shear : float, optional\n Shear angle in counter-clockwise direction as radians.\n translation : (tx, ty) as array, list or tuple, optional\n Translation parameters.\n\n Attributes\n ----------\n params : (3, 3) array\n Homogeneous transformation matrix.\n\n \"\"\"\n\n _coeffs = range(6)\n\n def __init__(self, matrix=None, scale=None, rotation=None, shear=None,\n translation=None):\n params = any(param is not None\n for param in (scale, rotation, shear, translation))\n\n if params and matrix is not None:\n raise ValueError(\"You cannot specify the transformation matrix and\"\n \" the implicit parameters at the same time.\")\n elif matrix is not None:\n if matrix.shape != (3, 3):\n raise ValueError(\"Invalid shape of transformation matrix.\")\n self.params = matrix\n elif params:\n if scale is None:\n scale = (1, 1)\n if rotation is None:\n rotation = 0\n if shear is None:\n shear = 0\n if translation is None:\n translation = (0, 0)\n\n sx, sy = scale\n self.params = np.array([\n [sx * math.cos(rotation), -sy * math.sin(rotation + shear), 0],\n [sx * math.sin(rotation), sy * math.cos(rotation + shear), 0],\n [ 0, 0, 1]\n ])\n self.params[0:2, 2] = translation\n else:\n # default to an identity transform\n self.params = np.eye(3)\n\n @property\n def scale(self):\n sx = math.sqrt(self.params[0, 0] ** 2 + self.params[1, 0] ** 2)\n sy = math.sqrt(self.params[0, 1] ** 2 + self.params[1, 1] ** 2)\n return sx, sy\n\n @property\n def rotation(self):\n return math.atan2(self.params[1, 0], self.params[0, 0])\n\n @property\n def shear(self):\n beta = math.atan2(- self.params[0, 1], self.params[1, 1])\n return beta - self.rotation\n\n @property\n def translation(self):\n return self.params[0:2, 2]\n\n\nclass PiecewiseAffineTransform(GeometricTransform):\n\n \"\"\"2D piecewise affine transformation.\n\n Control points are used to define the mapping. The transform is based on\n a Delaunay triangulation of the points to form a mesh. Each triangle is\n used to find a local affine transform.\n\n Attributes\n ----------\n affines : list of AffineTransform objects\n Affine transformations for each triangle in the mesh.\n inverse_affines : list of AffineTransform objects\n Inverse affine transformations for each triangle in the mesh.\n\n \"\"\"\n\n def __init__(self):\n self._tesselation = None\n self._inverse_tesselation = None\n self.affines = None\n self.inverse_affines = None\n\n def estimate(self, src, dst):\n \"\"\"Set the control points with which to perform the piecewise mapping.\n\n Number of source and destination coordinates must match.\n\n Parameters\n ----------\n src : (N, 2) array\n Source coordinates.\n dst : (N, 2) array\n Destination coordinates.\n\n Returns\n -------\n success : bool\n True, if model estimation succeeds.\n\n \"\"\"\n\n # forward piecewise affine\n # triangulate input positions into mesh\n self._tesselation = spatial.Delaunay(src)\n # find affine mapping from source positions to destination\n self.affines = []\n for tri in self._tesselation.vertices:\n affine = AffineTransform()\n affine.estimate(src[tri, :], dst[tri, :])\n self.affines.append(affine)\n\n # inverse piecewise affine\n # triangulate input positions into mesh\n self._inverse_tesselation = spatial.Delaunay(dst)\n # find affine mapping from source positions to destination\n self.inverse_affines = []\n for tri in self._inverse_tesselation.vertices:\n affine = AffineTransform()\n affine.estimate(dst[tri, :], src[tri, :])\n self.inverse_affines.append(affine)\n\n return True\n\n def __call__(self, coords):\n \"\"\"Apply forward transformation.\n\n Coordinates outside of the mesh will be set to `- 1`.\n\n Parameters\n ----------\n coords : (N, 2) array\n Source coordinates.\n\n Returns\n -------\n coords : (N, 2) array\n Transformed coordinates.\n\n \"\"\"\n\n out = np.empty_like(coords, np.double)\n\n # determine triangle index for each coordinate\n simplex = self._tesselation.find_simplex(coords)\n\n # coordinates outside of mesh\n out[simplex == -1, :] = -1\n\n for index in range(len(self._tesselation.vertices)):\n # affine transform for triangle\n affine = self.affines[index]\n # all coordinates within triangle\n index_mask = simplex == index\n\n out[index_mask, :] = affine(coords[index_mask, :])\n\n return out\n\n def inverse(self, coords):\n \"\"\"Apply inverse transformation.\n\n Coordinates outside of the mesh will be set to `- 1`.\n\n Parameters\n ----------\n coords : (N, 2) array\n Source coordinates.\n\n Returns\n -------\n coords : (N, 2) array\n Transformed coordinates.\n\n \"\"\"\n\n out = np.empty_like(coords, np.double)\n\n # determine triangle index for each coordinate\n simplex = self._inverse_tesselation.find_simplex(coords)\n\n # coordinates outside of mesh\n out[simplex == -1, :] = -1\n\n for index in range(len(self._inverse_tesselation.vertices)):\n # affine transform for triangle\n affine = self.inverse_affines[index]\n # all coordinates within triangle\n index_mask = simplex == index\n\n out[index_mask, :] = affine(coords[index_mask, :])\n\n return out\n\n\nclass SimilarityTransform(ProjectiveTransform):\n \"\"\"2D similarity transformation of the form:\n\n ..:math:\n\n X = a0 * x - b0 * y + a1 =\n = m * x * cos(rotation) - m * y * sin(rotation) + a1\n\n Y = b0 * x + a0 * y + b1 =\n = m * x * sin(rotation) + m * y * cos(rotation) + b1\n\n where ``m`` is a zoom factor and the homogeneous transformation matrix is::\n\n [[a0 b0 a1]\n [b0 a0 b1]\n [0 0 1]]\n\n Parameters\n ----------\n matrix : (3, 3) array, optional\n Homogeneous transformation matrix.\n scale : float, optional\n Scale factor.\n rotation : float, optional\n Rotation angle in counter-clockwise direction as radians.\n translation : (tx, ty) as array, list or tuple, optional\n x, y translation parameters.\n\n Attributes\n ----------\n params : (3, 3) array\n Homogeneous transformation matrix.\n\n \"\"\"\n\n def __init__(self, matrix=None, scale=None, rotation=None,\n translation=None):\n params = any(param is not None\n for param in (scale, rotation, translation))\n\n if params and matrix is not None:\n raise ValueError(\"You cannot specify the transformation matrix and\"\n \" the implicit parameters at the same time.\")\n elif matrix is not None:\n if matrix.shape != (3, 3):\n raise ValueError(\"Invalid shape of transformation matrix.\")\n self.params = matrix\n elif params:\n if scale is None:\n scale = 1\n if rotation is None:\n rotation = 0\n if translation is None:\n translation = (0, 0)\n\n self.params = np.array([\n [math.cos(rotation), - math.sin(rotation), 0],\n [math.sin(rotation), math.cos(rotation), 0],\n [ 0, 0, 1]\n ])\n self.params[0:2, 0:2] *= scale\n self.params[0:2, 2] = translation\n else:\n # default to an identity transform\n self.params = np.eye(3)\n\n def estimate(self, src, dst):\n \"\"\"Set the transformation matrix with the explicit parameters.\n\n You can determine the over-, well- and under-determined parameters\n with the total least-squares method.\n\n Number of source and destination coordinates must match.\n\n The transformation is defined as::\n\n X = a0 * x - b0 * y + a1\n Y = b0 * x + a0 * y + b1\n\n These equations can be transformed to the following form::\n\n 0 = a0 * x - b0 * y + a1 - X\n 0 = b0 * x + a0 * y + b1 - Y\n\n which exist for each set of corresponding points, so we have a set of\n N * 2 equations. The coefficients appear linearly so we can write\n A x = 0, where::\n\n A = [[x 1 -y 0 -X]\n [y 0 x 1 -Y]\n ...\n ...\n ]\n x.T = [a0 a1 b0 b1 c3]\n\n In case of total least-squares the solution of this homogeneous system\n of equations is the right singular vector of A which corresponds to the\n smallest singular value normed by the coefficient c3.\n\n Parameters\n ----------\n src : (N, 2) array\n Source coordinates.\n dst : (N, 2) array\n Destination coordinates.\n\n Returns\n -------\n success : bool\n True, if model estimation succeeds.\n\n \"\"\"\n\n try:\n src_matrix, src = _center_and_normalize_points(src)\n dst_matrix, dst = _center_and_normalize_points(dst)\n except ZeroDivisionError:\n self.params = np.nan * np.empty((3, 3))\n return False\n\n xs = src[:, 0]\n ys = src[:, 1]\n xd = dst[:, 0]\n yd = dst[:, 1]\n rows = src.shape[0]\n\n # params: a0, a1, b0, b1\n A = np.zeros((rows * 2, 5))\n A[:rows, 0] = xs\n A[:rows, 2] = - ys\n A[:rows, 1] = 1\n A[rows:, 2] = xs\n A[rows:, 0] = ys\n A[rows:, 3] = 1\n A[:rows, 4] = xd\n A[rows:, 4] = yd\n\n _, _, V = np.linalg.svd(A)\n\n # solution is right singular vector that corresponds to smallest\n # singular value\n a0, a1, b0, b1 = - V[-1, :-1] / V[-1, -1]\n\n S = np.array([[a0, -b0, a1],\n [b0, a0, b1],\n [ 0, 0, 1]])\n\n # De-center and de-normalize\n S = np.dot(np.linalg.inv(dst_matrix), np.dot(S, src_matrix))\n\n self.params = S\n\n return True\n\n @property\n def scale(self):\n if abs(math.cos(self.rotation)) < np.spacing(1):\n # sin(self.rotation) == 1\n scale = self.params[1, 0]\n else:\n scale = self.params[0, 0] / math.cos(self.rotation)\n return scale\n\n @property\n def rotation(self):\n return math.atan2(self.params[1, 0], self.params[1, 1])\n\n @property\n def translation(self):\n return self.params[0:2, 2]\n\n\nclass PolynomialTransform(GeometricTransform):\n \"\"\"2D transformation of the form:\n\n ..:math:\n\n X = sum[j=0:order]( sum[i=0:j]( a_ji * x**(j - i) * y**i ))\n Y = sum[j=0:order]( sum[i=0:j]( b_ji * x**(j - i) * y**i ))\n\n Parameters\n ----------\n params : (2, N) array, optional\n Polynomial coefficients where `N * 2 = (order + 1) * (order + 2)`. So,\n a_ji is defined in `params[0, :]` and b_ji in `params[1, :]`.\n\n Attributes\n ----------\n params : (2, N) array\n Polynomial coefficients where `N * 2 = (order + 1) * (order + 2)`. So,\n a_ji is defined in `params[0, :]` and b_ji in `params[1, :]`.\n\n \"\"\"\n\n def __init__(self, params=None):\n if params is None:\n # default to transformation which preserves original coordinates\n params = np.array([[0, 1, 0], [0, 0, 1]])\n if params.shape[0] != 2:\n raise ValueError(\"invalid shape of transformation parameters\")\n self.params = params\n\n def estimate(self, src, dst, order=2):\n \"\"\"Set the transformation matrix with the explicit transformation\n parameters.\n\n You can determine the over-, well- and under-determined parameters\n with the total least-squares method.\n\n Number of source and destination coordinates must match.\n\n The transformation is defined as::\n\n X = sum[j=0:order]( sum[i=0:j]( a_ji * x**(j - i) * y**i ))\n Y = sum[j=0:order]( sum[i=0:j]( b_ji * x**(j - i) * y**i ))\n\n These equations can be transformed to the following form::\n\n 0 = sum[j=0:order]( sum[i=0:j]( a_ji * x**(j - i) * y**i )) - X\n 0 = sum[j=0:order]( sum[i=0:j]( b_ji * x**(j - i) * y**i )) - Y\n\n which exist for each set of corresponding points, so we have a set of\n N * 2 equations. The coefficients appear linearly so we can write\n A x = 0, where::\n\n A = [[1 x y x**2 x*y y**2 ... 0 ... 0 -X]\n [0 ... 0 1 x y x**2 x*y y**2 -Y]\n ...\n ...\n ]\n x.T = [a00 a10 a11 a20 a21 a22 ... ann\n b00 b10 b11 b20 b21 b22 ... bnn c3]\n\n In case of total least-squares the solution of this homogeneous system\n of equations is the right singular vector of A which corresponds to the\n smallest singular value normed by the coefficient c3.\n\n Parameters\n ----------\n src : (N, 2) array\n Source coordinates.\n dst : (N, 2) array\n Destination coordinates.\n order : int, optional\n Polynomial order (number of coefficients is order + 1).\n\n Returns\n -------\n success : bool\n True, if model estimation succeeds.\n\n \"\"\"\n xs = src[:, 0]\n ys = src[:, 1]\n xd = dst[:, 0]\n yd = dst[:, 1]\n rows = src.shape[0]\n\n # number of unknown polynomial coefficients\n order = safe_as_int(order)\n u = (order + 1) * (order + 2)\n\n A = np.zeros((rows * 2, u + 1))\n pidx = 0\n for j in range(order + 1):\n for i in range(j + 1):\n A[:rows, pidx] = xs ** (j - i) * ys ** i\n A[rows:, pidx + u // 2] = xs ** (j - i) * ys ** i\n pidx += 1\n\n A[:rows, -1] = xd\n A[rows:, -1] = yd\n\n _, _, V = np.linalg.svd(A)\n\n # solution is right singular vector that corresponds to smallest\n # singular value\n params = - V[-1, :-1] / V[-1, -1]\n\n self.params = params.reshape((2, u // 2))\n\n return True\n\n def __call__(self, coords):\n \"\"\"Apply forward transformation.\n\n Parameters\n ----------\n coords : (N, 2) array\n source coordinates\n\n Returns\n -------\n coords : (N, 2) array\n Transformed coordinates.\n\n \"\"\"\n x = coords[:, 0]\n y = coords[:, 1]\n u = len(self.params.ravel())\n # number of coefficients -> u = (order + 1) * (order + 2)\n order = int((- 3 + math.sqrt(9 - 4 * (2 - u))) / 2)\n dst = np.zeros(coords.shape)\n\n pidx = 0\n for j in range(order + 1):\n for i in range(j + 1):\n dst[:, 0] += self.params[0, pidx] * x ** (j - i) * y ** i\n dst[:, 1] += self.params[1, pidx] * x ** (j - i) * y ** i\n pidx += 1\n\n return dst\n\n def inverse(self, coords):\n raise Exception(\n 'There is no explicit way to do the inverse polynomial '\n 'transformation. Instead, estimate the inverse transformation '\n 'parameters by exchanging source and destination coordinates,'\n 'then apply the forward transformation.')\n\n\nTRANSFORMS = {\n 'similarity': SimilarityTransform,\n 'affine': AffineTransform,\n 'piecewise-affine': PiecewiseAffineTransform,\n 'projective': ProjectiveTransform,\n 'polynomial': PolynomialTransform,\n}\n\nHOMOGRAPHY_TRANSFORMS = (\n SimilarityTransform,\n AffineTransform,\n ProjectiveTransform\n)\n\n\ndef estimate_transform(ttype, src, dst, **kwargs):\n \"\"\"Estimate 2D geometric transformation parameters.\n\n You can determine the over-, well- and under-determined parameters\n with the total least-squares method.\n\n Number of source and destination coordinates must match.\n\n Parameters\n ----------\n ttype : {'similarity', 'affine', 'piecewise-affine', 'projective', \\\n 'polynomial'}\n Type of transform.\n kwargs : array or int\n Function parameters (src, dst, n, angle)::\n\n NAME / TTYPE FUNCTION PARAMETERS\n 'similarity' `src, `dst`\n 'affine' `src, `dst`\n 'piecewise-affine' `src, `dst`\n 'projective' `src, `dst`\n 'polynomial' `src, `dst`, `order` (polynomial order,\n default order is 2)\n\n Also see examples below.\n\n Returns\n -------\n tform : :class:`GeometricTransform`\n Transform object containing the transformation parameters and providing\n access to forward and inverse transformation functions.\n\n Examples\n --------\n >>> import numpy as np\n >>> from skimage import transform as tf\n\n >>> # estimate transformation parameters\n >>> src = np.array([0, 0, 10, 10]).reshape((2, 2))\n >>> dst = np.array([12, 14, 1, -20]).reshape((2, 2))\n\n >>> tform = tf.estimate_transform('similarity', src, dst)\n\n >>> np.allclose(tform.inverse(tform(src)), src)\n True\n\n >>> # warp image using the estimated transformation\n >>> from skimage import data\n >>> image = data.camera()\n\n >>> warp(image, inverse_map=tform.inverse) # doctest: +SKIP\n\n >>> # create transformation with explicit parameters\n >>> tform2 = tf.SimilarityTransform(scale=1.1, rotation=1,\n ... translation=(10, 20))\n\n >>> # unite transformations, applied in order from left to right\n >>> tform3 = tform + tform2\n >>> np.allclose(tform3(src), tform2(tform(src)))\n True\n\n \"\"\"\n ttype = ttype.lower()\n if ttype not in TRANSFORMS:\n raise ValueError('the transformation type \\'%s\\' is not'\n 'implemented' % ttype)\n\n tform = TRANSFORMS[ttype]()\n tform.estimate(src, dst, **kwargs)\n\n return tform\n\n\ndef matrix_transform(coords, matrix):\n \"\"\"Apply 2D matrix transform.\n\n Parameters\n ----------\n coords : (N, 2) array\n x, y coordinates to transform\n matrix : (3, 3) array\n Homogeneous transformation matrix.\n\n Returns\n -------\n coords : (N, 2) array\n Transformed coordinates.\n\n \"\"\"\n return ProjectiveTransform(matrix)(coords)\n\n\ndef _stackcopy(a, b):\n \"\"\"Copy b into each color layer of a, such that::\n\n a[:,:,0] = a[:,:,1] = ... = b\n\n Parameters\n ----------\n a : (M, N) or (M, N, P) ndarray\n Target array.\n b : (M, N)\n Source array.\n\n Notes\n -----\n Color images are stored as an ``(M, N, 3)`` or ``(M, N, 4)`` arrays.\n\n \"\"\"\n if a.ndim == 3:\n a[:] = b[:, :, np.newaxis]\n else:\n a[:] = b\n\n\ndef warp_coords(coord_map, shape, dtype=np.float64):\n \"\"\"Build the source coordinates for the output of a 2-D image warp.\n\n Parameters\n ----------\n coord_map : callable like GeometricTransform.inverse\n Return input coordinates for given output coordinates.\n Coordinates are in the shape (P, 2), where P is the number\n of coordinates and each element is a ``(row, col)`` pair.\n shape : tuple\n Shape of output image ``(rows, cols[, bands])``.\n dtype : np.dtype or string\n dtype for return value (sane choices: float32 or float64).\n\n Returns\n -------\n coords : (ndim, rows, cols[, bands]) array of dtype `dtype`\n Coordinates for `scipy.ndimage.map_coordinates`, that will yield\n an image of shape (orows, ocols, bands) by drawing from source\n points according to the `coord_transform_fn`.\n\n Notes\n -----\n\n This is a lower-level routine that produces the source coordinates for 2-D\n images used by `warp()`.\n\n It is provided separately from `warp` to give additional flexibility to\n users who would like, for example, to re-use a particular coordinate\n mapping, to use specific dtypes at various points along the the\n image-warping process, or to implement different post-processing logic\n than `warp` performs after the call to `ndi.map_coordinates`.\n\n\n Examples\n --------\n Produce a coordinate map that shifts an image up and to the right:\n\n >>> from skimage import data\n >>> from scipy.ndimage import map_coordinates\n >>>\n >>> def shift_up10_left20(xy):\n ... return xy - np.array([-20, 10])[None, :]\n >>>\n >>> image = data.astronaut().astype(np.float32)\n >>> coords = warp_coords(shift_up10_left20, image.shape)\n >>> warped_image = map_coordinates(image, coords)\n\n \"\"\"\n shape = safe_as_int(shape)\n rows, cols = shape[0], shape[1]\n coords_shape = [len(shape), rows, cols]\n if len(shape) == 3:\n coords_shape.append(shape[2])\n coords = np.empty(coords_shape, dtype=dtype)\n\n # Reshape grid coordinates into a (P, 2) array of (row, col) pairs\n tf_coords = np.indices((cols, rows), dtype=dtype).reshape(2, -1).T\n\n # Map each (row, col) pair to the source image according to\n # the user-provided mapping\n tf_coords = coord_map(tf_coords)\n\n # Reshape back to a (2, M, N) coordinate grid\n tf_coords = tf_coords.T.reshape((-1, cols, rows)).swapaxes(1, 2)\n\n # Place the y-coordinate mapping\n _stackcopy(coords[1, ...], tf_coords[0, ...])\n\n # Place the x-coordinate mapping\n _stackcopy(coords[0, ...], tf_coords[1, ...])\n\n if len(shape) == 3:\n coords[2, ...] = range(shape[2])\n\n return coords\n\n\ndef _convert_warp_input(image, preserve_range):\n \"\"\"Convert input image to double image with the appropriate range.\"\"\"\n if preserve_range:\n image = image.astype(np.double)\n else:\n image = img_as_float(image)\n return image\n\n\ndef _clip_warp_output(input_image, output_image, order, mode, cval, clip):\n \"\"\"Clip output image to range of values of input image.\n\n Note that this function modifies the values of `output_image` in-place\n and it is only modified if ``clip=True``.\n\n Parameters\n ----------\n input_image : ndarray\n Input image.\n output_image : ndarray\n Output image, which is modified in-place.\n\n Other parameters\n ----------------\n order : int, optional\n The order of the spline interpolation, default is 1. The order has to\n be in the range 0-5. See `skimage.transform.warp` for detail.\n mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}, optional\n Points outside the boundaries of the input are filled according\n to the given mode. Modes match the behaviour of `numpy.pad`.\n cval : float, optional\n Used in conjunction with mode 'constant', the value outside\n the image boundaries.\n clip : bool, optional\n Whether to clip the output to the range of values of the input image.\n This is enabled by default, since higher order interpolation may\n produce values outside the given input range.\n\n \"\"\"\n if clip and order != 0:\n min_val = input_image.min()\n max_val = input_image.max()\n\n preserve_cval = mode == 'constant' and not \\\n (min_val <= cval <= max_val)\n\n if preserve_cval:\n cval_mask = output_image == cval\n\n np.clip(output_image, min_val, max_val, out=output_image)\n\n if preserve_cval:\n output_image[cval_mask] = cval\n\n\ndef warp(image, inverse_map=None, map_args={}, output_shape=None, order=1,\n mode='constant', cval=0., clip=True, preserve_range=False):\n \"\"\"Warp an image according to a given coordinate transformation.\n\n Parameters\n ----------\n image : ndarray\n Input image.\n inverse_map : transformation object, callable ``cr = f(cr, **kwargs)``, or ndarray\n Inverse coordinate map, which transforms coordinates in the output\n images into their corresponding coordinates in the input image.\n\n There are a number of different options to define this map, depending\n on the dimensionality of the input image. A 2-D image can have 2\n dimensions for gray-scale images, or 3 dimensions with color\n information.\n\n - For 2-D images, you can directly pass a transformation object,\n e.g. `skimage.transform.SimilarityTransform`, or its inverse.\n - For 2-D images, you can pass a ``(3, 3)`` homogeneous\n transformation matrix, e.g.\n `skimage.transform.SimilarityTransform.params`.\n - For 2-D images, a function that transforms a ``(M, 2)`` array of\n ``(col, row)`` coordinates in the output image to their\n corresponding coordinates in the input image. Extra parameters to\n the function can be specified through `map_args`.\n - For N-D images, you can directly pass an array of coordinates.\n The first dimension specifies the coordinates in the input image,\n while the subsequent dimensions determine the position in the\n output image. E.g. in case of 2-D images, you need to pass an array\n of shape ``(2, rows, cols)``, where `rows` and `cols` determine the\n shape of the output image, and the first dimension contains the\n ``(row, col)`` coordinate in the input image.\n See `scipy.ndimage.map_coordinates` for further documentation.\n\n Note, that a ``(3, 3)`` matrix is interpreted as a homogeneous\n transformation matrix, so you cannot interpolate values from a 3-D\n input, if the output is of shape ``(3,)``.\n\n See example section for usage.\n map_args : dict, optional\n Keyword arguments passed to `inverse_map`.\n output_shape : tuple (rows, cols), optional\n Shape of the output image generated. By default the shape of the input\n image is preserved. Note that, even for multi-band images, only rows\n and columns need to be specified.\n order : int, optional\n The order of interpolation. The order has to be in the range 0-5:\n - 0: Nearest-neighbor\n - 1: Bi-linear (default)\n - 2: Bi-quadratic\n - 3: Bi-cubic\n - 4: Bi-quartic\n - 5: Bi-quintic\n mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}, optional\n Points outside the boundaries of the input are filled according\n to the given mode. Modes match the behaviour of `numpy.pad`.\n cval : float, optional\n Used in conjunction with mode 'constant', the value outside\n the image boundaries.\n clip : bool, optional\n Whether to clip the output to the range of values of the input image.\n This is enabled by default, since higher order interpolation may\n produce values outside the given input range.\n preserve_range : bool, optional\n Whether to keep the original range of values. Otherwise, the input\n image is converted according to the conventions of `img_as_float`.\n\n Returns\n -------\n warped : double ndarray\n The warped input image.\n\n Notes\n -----\n - The input image is converted to a `double` image.\n - In case of a `SimilarityTransform`, `AffineTransform` and\n `ProjectiveTransform` and `order` in [0, 3] this function uses the\n underlying transformation matrix to warp the image with a much faster\n routine.\n\n Examples\n --------\n >>> from skimage.transform import warp\n >>> from skimage import data\n >>> image = data.camera()\n\n The following image warps are all equal but differ substantially in\n execution time. The image is shifted to the bottom.\n\n Use a geometric transform to warp an image (fast):\n\n >>> from skimage.transform import SimilarityTransform\n >>> tform = SimilarityTransform(translation=(0, -10))\n >>> warped = warp(image, tform)\n\n Use a callable (slow):\n\n >>> def shift_down(xy):\n ... xy[:, 1] -= 10\n ... return xy\n >>> warped = warp(image, shift_down)\n\n Use a transformation matrix to warp an image (fast):\n\n >>> matrix = np.array([[1, 0, 0], [0, 1, -10], [0, 0, 1]])\n >>> warped = warp(image, matrix)\n >>> from skimage.transform import ProjectiveTransform\n >>> warped = warp(image, ProjectiveTransform(matrix=matrix))\n\n You can also use the inverse of a geometric transformation (fast):\n\n >>> warped = warp(image, tform.inverse)\n\n For N-D images you can pass a coordinate array, that specifies the\n coordinates in the input image for every element in the output image. E.g.\n if you want to rescale a 3-D cube, you can do:\n\n >>> cube_shape = np.array([30, 30, 30])\n >>> cube = np.random.rand(*cube_shape)\n\n Setup the coordinate array, that defines the scaling:\n\n >>> scale = 0.1\n >>> output_shape = (scale * cube_shape).astype(int)\n >>> coords0, coords1, coords2 = np.mgrid[:output_shape[0],\n ... :output_shape[1], :output_shape[2]]\n >>> coords = np.array([coords0, coords1, coords2])\n\n Assume that the cube contains spatial data, where the first array element\n center is at coordinate (0.5, 0.5, 0.5) in real space, i.e. we have to\n account for this extra offset when scaling the image:\n\n >>> coords = (coords + 0.5) / scale - 0.5\n >>> warped = warp(cube, coords)\n\n \"\"\"\n mode = _mode_deprecations(mode)\n image = _convert_warp_input(image, preserve_range)\n\n input_shape = np.array(image.shape)\n\n if output_shape is None:\n output_shape = input_shape\n else:\n output_shape = safe_as_int(output_shape)\n\n warped = None\n\n if order == 2:\n # When fixing this issue, make sure to fix the branches further\n # below in this function\n warn(\"Bi-quadratic interpolation behavior has changed due \"\n \"to a bug in the implementation of scikit-image. \"\n \"The new version now serves as a wrapper \"\n \"around SciPy's interpolation functions, which itself \"\n \"is not verified to be a correct implementation. Until \"\n \"skimage's implementation is fixed, we recommend \"\n \"to use bi-linear or bi-cubic interpolation instead.\")\n\n if order in (0, 1, 3) and not map_args:\n # use fast Cython version for specific interpolation orders and input\n\n matrix = None\n\n if isinstance(inverse_map, np.ndarray) and inverse_map.shape == (3, 3):\n # inverse_map is a transformation matrix as numpy array\n matrix = inverse_map\n\n elif isinstance(inverse_map, HOMOGRAPHY_TRANSFORMS):\n # inverse_map is a homography\n matrix = inverse_map.params\n\n elif (hasattr(inverse_map, '__name__')\n and inverse_map.__name__ == 'inverse'\n and get_bound_method_class(inverse_map) \\\n in HOMOGRAPHY_TRANSFORMS):\n # inverse_map is the inverse of a homography\n matrix = np.linalg.inv(six.get_method_self(inverse_map).params)\n\n if matrix is not None:\n matrix = matrix.astype(np.double)\n if image.ndim == 2:\n warped = _warp_fast(image, matrix,\n output_shape=output_shape,\n order=order, mode=mode, cval=cval)\n elif image.ndim == 3:\n dims = []\n for dim in range(image.shape[2]):\n dims.append(_warp_fast(image[..., dim], matrix,\n output_shape=output_shape,\n order=order, mode=mode, cval=cval))\n warped = np.dstack(dims)\n\n if warped is None:\n # use ndi.map_coordinates\n\n if (isinstance(inverse_map, np.ndarray)\n and inverse_map.shape == (3, 3)):\n # inverse_map is a transformation matrix as numpy array,\n # this is only used for order >= 4.\n inverse_map = ProjectiveTransform(matrix=inverse_map)\n\n if isinstance(inverse_map, np.ndarray):\n # inverse_map is directly given as coordinates\n coords = inverse_map\n else:\n # inverse_map is given as function, that transforms (N, 2)\n # destination coordinates to their corresponding source\n # coordinates. This is only supported for 2(+1)-D images.\n\n if image.ndim < 2 or image.ndim > 3:\n raise ValueError(\"Only 2-D images (grayscale or color) are \"\n \"supported, when providing a callable \"\n \"`inverse_map`.\")\n\n def coord_map(*args):\n return inverse_map(*args, **map_args)\n\n if len(input_shape) == 3 and len(output_shape) == 2:\n # Input image is 2D and has color channel, but output_shape is\n # given for 2-D images. Automatically add the color channel\n # dimensionality.\n output_shape = (output_shape[0], output_shape[1],\n input_shape[2])\n\n coords = warp_coords(coord_map, output_shape)\n\n # Pre-filtering not necessary for order 0, 1 interpolation\n prefilter = order > 1\n\n ndi_mode = _to_ndimage_mode(mode)\n warped = ndi.map_coordinates(image, coords, prefilter=prefilter,\n mode=ndi_mode, order=order, cval=cval)\n\n\n _clip_warp_output(image, warped, order, mode, cval, clip)\n\n return warped\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.asarray", "scipy.fftpack.fft", "numpy.all", "numpy.searchsorted", "scipy.fftpack.fftfreq", "numpy.clip", "numpy.arange", "numpy.sin", "numpy.ceil", "scipy.interpolate.interp1d", "numpy.interp", "numpy.zeros", "scipy.fftpack.ifft", "numpy.deg2rad", "numpy.argsort", "numpy.array", "numpy.log2", "numpy.abs", "numpy.cos" ], [ "numpy.testing.assert_equal", "numpy.testing.run_module_suite", "numpy.fliplr", "numpy.eye", "numpy.set_printoptions", "numpy.flipud", "numpy.testing.assert_array_equal", "numpy.testing.assert_almost_equal", "numpy.testing.assert_raises", "numpy.any", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.empty", "numpy.testing.assert_array_almost_equal" ], [ "numpy.testing.assert_equal", "numpy.testing.run_module_suite", "numpy.linspace", "numpy.random.seed", "numpy.arange", "numpy.ones", "numpy.testing.assert_almost_equal", "numpy.testing.assert_raises", "numpy.float64", "numpy.random.rand", "numpy.array", "numpy.zeros", "numpy.empty" ], [ "numpy.testing.run_module_suite", "numpy.abs", "numpy.ascontiguousarray", "numpy.eye", "numpy.issubdtype", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.sctype2char", "numpy.random.rand", "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros", "numpy.testing.assert_array_almost_equal" ], [ "numpy.dot", "numpy.mean", "numpy.linalg.svd", "numpy.ones_like", "numpy.clip", "numpy.empty_like", "numpy.eye", "numpy.zeros", "numpy.spacing", "numpy.linalg.inv", "scipy.ndimage.map_coordinates", "numpy.transpose", "numpy.array", "numpy.sum", "scipy.spatial.Delaunay", "numpy.indices", "numpy.dstack", "numpy.ones", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chrisjsewell/aiida-performance
[ "160606f07fe092a9e2bacdf62bfecec460fac642" ]
[ "tests/db_stats.py" ]
[ "\"\"\"Useful queries for profiling PostgreSQL databases\n\nThese queries are mainly adapted from\nhttps://gist.github.com/anvk/475c22cbca1edc5ce94546c871460fdd\n\"\"\"\nfrom functools import wraps\nfrom pathlib import Path\n\n\ndef execute_raw(raw):\n from aiida.manage.manager import get_manager\n\n backend = get_manager()._load_backend(schema_check=False)\n return backend.execute_raw(raw)\n\n\n# ------------------\n# -- Memory Size --\n# ------------------\n\n\ndef memory_db_df():\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n datname,\n pg_database_size(datname)\n from pg_database\n order by pg_database_size(datname);\n \"\"\"\n )\n df = pd.DataFrame(result, columns=[\"database\", \"size_mb\"])\n df[\"size_mb\"] = df[\"size_mb\"] * 1e-6\n return df\n\n\ndef memory_pg_classes_df():\n \"\"\"Return size of `pg_class`'s\n\n `pg_class` catalogs tables and most everything else that has columns,\n or is otherwise similar to a table.\n See https://www.postgresql.org/docs/9.3/catalog-pg-class.html\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n sum(pg_relation_size(pg_class.oid))::bigint,\n nspname,\n CASE pg_class.relkind\n WHEN 'r' THEN 'table'\n WHEN 'i' THEN 'index'\n WHEN 'S' THEN 'sequence'\n WHEN 'v' THEN 'view'\n WHEN 't' THEN 'toast'\n ELSE pg_class.relkind::text\n END\n FROM pg_class\n LEFT OUTER JOIN pg_namespace ON (pg_namespace.oid = pg_class.relnamespace)\n GROUP BY pg_class.relkind, nspname\n ORDER BY sum(pg_relation_size(pg_class.oid)) DESC;\n \"\"\"\n )\n df = pd.DataFrame(result, columns=[\"size_mb\", \"namespace\", \"relkind\"])\n df.sort_index(axis=1, inplace=True)\n df[\"size_mb\"] = df.size_mb * 1e-6\n return df\n\n\ndef memory_tables_df():\n \"\"\"Return statistics on indices.\n\n See https://www.postgresql.org/docs/current/monitoring-stats.html\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n select\n relname,\n pg_relation_size(relname::regclass) as table_size,\n pg_total_relation_size(relname::regclass) - pg_relation_size(relname::regclass) as index_size,\n pg_total_relation_size(relname::regclass) as total_size\n from pg_stat_user_tables\n \"\"\"\n )\n df = pd.DataFrame(result, columns=[\"name\", \"table_mb\", \"indices_mb\", \"total_mb\"])\n df.set_index(\"name\", inplace=True)\n df = df * 1e-6\n df.sort_values(\"total_mb\", ascending=False, inplace=True)\n return df\n\n\n# -------------\n# -- Indices --\n# -------------\n\n\ndef indices_list_df():\n \"\"\"Return list of indices by table and columns.\"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n select\n t.relname as table_name,\n i.relname as index_name,\n string_agg(a.attname, ',') as column_name\n from\n pg_class t,\n pg_class i,\n pg_index ix,\n pg_attribute a\n where\n t.oid = ix.indrelid\n and i.oid = ix.indexrelid\n and a.attrelid = t.oid\n and a.attnum = ANY(ix.indkey)\n and t.relkind = 'r'\n and t.relname not like 'pg_%'\n group by\n t.relname,\n i.relname\n order by\n t.relname,\n i.relname;\n \"\"\"\n )\n df = pd.DataFrame(result, columns=[\"table\", \"index\", \"columns\"])\n df.set_index([\"table\", \"columns\"], inplace=True)\n return df\n\n\ndef indices_stats_df(sort_size=False, with_sql=False):\n \"\"\"Return statistics on indices.\n\n See https://www.postgresql.org/docs/current/monitoring-stats.html\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n pt.tablename AS TableName,\n t.indexname AS IndexName,\n pc.reltuples AS TotalRows,\n pg_relation_size(quote_ident(pt.tablename)::text) AS TableSize,\n pg_relation_size(quote_ident(t.indexrelname)::text) AS IndexSize,\n t.idx_scan AS TotalNumberOfScan,\n t.idx_tup_read AS TotalTupleRead,\n t.idx_tup_fetch AS TotalTupleFetched,\n pgi.indexdef AS IndexDef\n FROM pg_tables AS pt\n LEFT OUTER JOIN pg_class AS pc\n ON pt.tablename=pc.relname\n LEFT OUTER JOIN\n (\n SELECT\n pc.relname AS TableName,\n pc2.relname AS IndexName,\n psai.idx_scan,\n psai.idx_tup_read,\n psai.idx_tup_fetch,\n psai.indexrelname\n FROM\n pg_index AS pi\n JOIN pg_class AS pc\n ON pc.oid = pi.indrelid\n JOIN pg_class AS pc2\n ON pc2.oid = pi.indexrelid\n JOIN pg_stat_all_indexes AS psai\n ON pi.indexrelid = psai.indexrelid\n ) AS T\n ON pt.tablename = T.TableName\n LEFT OUTER JOIN pg_indexes as pgi\n ON T.indexname = pgi.indexname\n WHERE pt.schemaname='public'\n ORDER BY 1;\n \"\"\"\n )\n columns = [\n \"table\",\n \"index\",\n \"rows\",\n \"table_size_mb\",\n \"index_size_mb\",\n # Number of index scans initiated on this index\n \"scans\",\n # Number of index entries returned by scans on this index\n \"read\",\n # Number of live rows fetched by index scans\n \"fetched\",\n \"sql\",\n ]\n df = pd.DataFrame(result, columns=columns)\n df.set_index([\"table\", \"index\"], inplace=True)\n df[\"table_size_mb\"] = df.table_size_mb * 10e-6\n df[\"index_size_mb\"] = df.index_size_mb * 10e-6\n if not with_sql:\n df.drop(\"sql\", axis=1, inplace=True)\n if sort_size:\n df.sort_values(\"index_size_mb\", ascending=False, inplace=True)\n else:\n df.sort_index(axis=0, inplace=True)\n return df\n\n\ndef indices_check_df(min_size_mb=0.1):\n \"\"\"Check for tables that may require an index.\"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n relname,\n seq_scan,\n idx_scan,\n pg_relation_size(relname::regclass) AS rel_size,\n n_live_tup\n FROM pg_stat_all_tables\n WHERE schemaname='public' AND pg_relation_size(relname::regclass)>{min_size};\n \"\"\".format(\n min_size=int(min_size_mb * 1e6)\n )\n )\n df = pd.DataFrame(\n result,\n columns=[\n \"table\",\n # Number of sequential scans initiated on this table\n \"seq_scans\",\n # Number of index scans initiated on this table\n \"idx_scans\",\n \"size_mb\",\n \"live_rows\",\n ],\n )\n df[\"idx_usage\"] = 100 * df.idx_scans / (df.seq_scans + df.idx_scans)\n df[\"idx_required\"] = (df.seq_scans - df.idx_scans) > 0\n df[\"size_mb\"] = df[\"size_mb\"] * 1e-6\n df.set_index(\"table\", inplace=True)\n return df\n\n\n# --------------------\n# -- Data Integrity --\n# --------------------\n\n\ndef cache_hit_ratio():\n \"\"\"Ideally hit_ration should be > 90%\"\"\"\n result = execute_raw(\n r\"\"\"\n SELECT\n sum(blks_hit)*100/sum(blks_hit+blks_read) as hit_ratio\n from pg_stat_database;\n \"\"\"\n )\n return float(result[0][0])\n\n\ndef anomalies_df():\n \"\"\"\n - c_commit_ratio should be > 95%\n - c_rollback_ratio should be < 5%\n - deadlocks should be close to 0\n - conflicts should be close to 0\n - temp_files and temp_bytes watch out for them\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n datname,\n (xact_commit*100)/nullif(xact_commit+xact_rollback,0) as c_commit_ratio,\n (xact_rollback*100)/nullif(xact_commit+xact_rollback, 0) as c_rollback_ratio,\n deadlocks,\n conflicts,\n temp_files,\n temp_bytes\n FROM pg_stat_database;\n \"\"\"\n )\n df = pd.DataFrame(\n result,\n columns=[\n \"database\",\n \"commit_ratio\",\n \"rollback_ratio\",\n \"deadlocks\",\n \"conflicts\",\n \"temp_files\",\n \"temp_size_mb\",\n ],\n )\n df[\"temp_size_mb\"] = df[\"temp_size_mb\"] * 1e-6\n return df\n\n\ndef write_activity_df(limit=50):\n \"\"\"\n hot_rate = rows HOT updated / total rows updated\n (Heap Only Tuple means with no separate index update required)\n\n Heap Only Tuple (HOT) means, creating a new update tuple if possible on the same page as the old tuple.\n Ideally hot_rate should be close to 100.\n You might be blocking HOT updates with indexes on updated columns. If those are expendable, you might get better overall performance without them.\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n s.relname,\n pg_relation_size(relid),\n coalesce(n_tup_ins,0) + 2 * coalesce(n_tup_upd,0) -\n coalesce(n_tup_hot_upd,0) + coalesce(n_tup_del,0) AS total_writes,\n (coalesce(n_tup_hot_upd,0)::float * 100 / (case when n_tup_upd > 0 then n_tup_upd else 1 end)::float) AS hot_rate\n /* This returns None\n (SELECT v[1] FROM regexp_matches(reloptions::text,E'fillfactor=(d+)') as r(v) limit 1) AS fillfactor\n */\n from pg_stat_all_tables\n s join pg_class c ON c.oid=relid\n order by total_writes desc\n limit {limit};\n \"\"\".format(\n limit=limit\n )\n )\n columns = [\n \"table\",\n \"size_mb\",\n \"writes\",\n \"hot_rate\",\n # \"fill_factor\"\n ]\n df = pd.DataFrame(result, columns=columns)\n df[\"size_mb\"] = df[\"size_mb\"] * 1e-6\n df.set_index(\"table\", inplace=True)\n return df\n\n\n# How many indexes are in cache\ndef cached_indices():\n result = execute_raw(\n r\"\"\"\n SELECT\n sum(idx_blks_read) as idx_read,\n sum(idx_blks_hit) as idx_hit,\n (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) as ratio\n FROM pg_statio_user_indexes;\n \"\"\"\n )\n return cached_indices\n\n\ndef dirty_pages():\n \"\"\"maxwritten_clean and buffers_backend_fsyn should be 0\"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT buffers_clean, maxwritten_clean, buffers_backend_fsync from pg_stat_bgwriter;\n \"\"\"\n )\n return pd.Series(\n dict(\n zip(\n (\"buffers_clean\", \"maxwritten_clean\", \"buffers_backend_fsync\"),\n result[0],\n )\n )\n )\n\n\n# -------------\n# -- Queries --\n# -------------\n\n\ndef requires_pg_stat(func):\n @wraps(func)\n def wrapper(*args, **kwds):\n try:\n return func(*args, **kwds)\n except Exception as err:\n if 'relation \"pg_stat_statements\" does not exist' in str(err):\n raise RuntimeError(\n \"This function requires that the pg_stat_statements extension is initialised on your database\"\n )\n raise\n\n return wrapper\n\n\n@requires_pg_stat\ndef query_reset_stats():\n return execute_raw(\"select pg_stat_statements_reset();\")\n\n\n@requires_pg_stat\ndef query_stats_df(limit=100):\n \"\"\"Return most CPU intensive queries\n\n See: https://www.postgresql.org/docs/9.4/pgstatstatements.html\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n query,\n round(total_time::numeric, 2) AS total_time,\n calls,\n rows,\n round((100 * total_time / sum(total_time::numeric) OVER ())::numeric, 2) AS percentage_cpu\n FROM pg_stat_statements\n ORDER BY total_time DESC\n LIMIT {limit};\n \"\"\".format(\n limit=limit\n )\n )\n # avg_time = total_time / calls\n df = pd.DataFrame(\n result, columns=[\"sql\", \"time_seconds\", \"calls\", \"rows\", \"cpu_percent\"]\n )\n df[\"time_seconds\"] = df[\"time_seconds\"].astype(float) * 1e-6\n df[\"type\"] = df.sql.apply(lambda s: s.split()[0].upper())\n return df\n\n\n@requires_pg_stat\ndef query_write_df():\n \"\"\"Return most writing (to shared_buffers) queries\n\n See: https://www.postgresql.org/docs/9.4/pgstatstatements.html\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n query,\n shared_blks_dirtied\n from pg_stat_statements\n where shared_blks_dirtied > 0\n order by 2 desc;\n \"\"\"\n )\n return pd.DataFrame(result, columns=[\"sql\", \"blocks_written\"])\n\n\nif __name__ == \"__main__\":\n import argparse, os\n parser = argparse.ArgumentParser()\n parser.add_argument(\"commands\", choices=[\"queries\", \"indices\", \"reset\"], nargs='+')\n parser.add_argument(\"-n\", \"--name\", default=\"test\")\n parser.add_argument(\"-p\", \"--path\", default=os.getcwd())\n args = parser.parse_args()\n\n for _command in args.commands:\n if _command == \"queries\":\n Path(args.path).joinpath(args.name + \"_queries.html\").write_text(query_stats_df().to_html())\n if _command == \"indices\":\n Path(args.path).joinpath(args.name + \"_indices.html\").write_text(indices_stats_df().to_html())\n elif _command == \"reset\":\n query_reset_stats()\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
mnicstruwig/optbinning
[ "6ce991e1ca75b4d41835f3b3bf8e0f294f6ba780" ]
[ "optbinning/binning/piecewise/continuous_binning.py" ]
[ "\"\"\"\nOptimal piecewise binning for continuous target.\n\"\"\"\n\n# Guillermo Navas-Palencia <[email protected]>\n# Copyright (C) 2020\n\nimport time\n\nimport numpy as np\n\nfrom .base import _check_parameters\nfrom .base import BasePWBinning\nfrom .binning_statistics import PWContinuousBinningTable\nfrom .metrics import continuous_metrics\nfrom .transformations import transform_continuous_target\n\n\nclass ContinuousOptimalPWBinning(BasePWBinning):\n \"\"\"Optimal Piecewise binning of a numerical variable with respect to a\n binary target.\n\n Parameters\n ----------\n name : str, optional (default=\"\")\n The variable name.\n\n objective : str, optional (default=\"l2\")\n The objective function. Supported objectives are \"l2\", \"l1\", \"huber\"\n and \"quantile\". Note that \"l1\", \"huber\" and \"quantile\" are robust\n objective functions.\n\n degree : int (default=1)\n The degree of the polynomials.\n\n * degree = 0: piecewise constant functions.\n * degree = 1: piecewise linear functions.\n * degree > 1: piecewise polynomial functions.\n\n continuous : bool (default=True)\n Whether to fit a continuous or discontinuous piecewise regression.\n\n prebinning_method : str, optional (default=\"cart\")\n The pre-binning method. Supported methods are \"cart\" for a CART\n decision tree, \"quantile\" to generate prebins with approximately same\n frequency and \"uniform\" to generate prebins with equal width. Method\n \"cart\" uses `sklearn.tree.DecistionTreeClassifier\n <https://scikit-learn.org/stable/modules/generated/sklearn.tree.\n DecisionTreeClassifier.html>`_.\n\n max_n_prebins : int (default=20)\n The maximum number of bins after pre-binning (prebins).\n\n min_prebin_size : float (default=0.05)\n The fraction of mininum number of records for each prebin.\n\n min_n_bins : int or None, optional (default=None)\n The minimum number of bins. If None, then ``min_n_bins`` is\n a value in ``[0, max_n_prebins]``.\n\n max_n_bins : int or None, optional (default=None)\n The maximum number of bins. If None, then ``max_n_bins`` is\n a value in ``[0, max_n_prebins]``.\n\n min_bin_size : float or None, optional (default=None)\n The fraction of minimum number of records for each bin. If None,\n ``min_bin_size = min_prebin_size``.\n\n max_bin_size : float or None, optional (default=None)\n The fraction of maximum number of records for each bin. If None,\n ``max_bin_size = 1.0``.\n\n monotonic_trend : str or None, optional (default=\"auto\")\n The monotonic trend. Supported trends are “auto”, \"auto_heuristic\" and\n \"auto_asc_desc\" to automatically determine the trend maximizing IV\n using a machine learning classifier, \"ascending\", \"descending\",\n \"concave\", \"convex\", \"peak\" and \"peak_heuristic\" to allow a peak change\n point, and \"valley\" and \"valley_heuristic\" to allow a valley change\n point. Trends \"auto_heuristic\", \"peak_heuristic\" and \"valley_heuristic\"\n use a heuristic to determine the change point, and are significantly\n faster for large size instances (``max_n_prebins > 20``). Trend\n \"auto_asc_desc\" is used to automatically select the best monotonic\n trend between \"ascending\" and \"descending\". If None, then the\n monotonic constraint is disabled.\n\n n_subsamples : int or None (default=None)\n Number of subsamples to fit the piecewise regression algorithm. If\n None, all values are considered.\n\n max_pvalue : float or None, optional (default=0.05)\n The maximum p-value among bins. The Z-test is used to detect bins\n not satisfying the p-value constraint. Option supported by solvers\n \"cp\" and \"mip\".\n\n max_pvalue_policy : str, optional (default=\"consecutive\")\n The method to determine bins not satisfying the p-value constraint.\n Supported methods are \"consecutive\" to compare consecutive bins and\n \"all\" to compare all bins.\n\n outlier_detector : str or None, optional (default=None)\n The outlier detection method. Supported methods are \"range\" to use\n the interquartile range based method or \"zcore\" to use the modified\n Z-score method.\n\n outlier_params : dict or None, optional (default=None)\n Dictionary of parameters to pass to the outlier detection method.\n\n user_splits : array-like or None, optional (default=None)\n The list of pre-binning split points when ``dtype`` is \"numerical\" or\n the list of prebins when ``dtype`` is \"categorical\".\n\n user_splits_fixed : array-like or None (default=None)\n The list of pre-binning split points that must be fixed.\n\n special_codes : array-like or None, optional (default=None)\n List of special codes. Use special codes to specify the data values\n that must be treated separately.\n\n split_digits : int or None, optional (default=None)\n The significant digits of the split points. If ``split_digits`` is set\n to 0, the split points are integers. If None, then all significant\n digits in the split points are considered.\n\n solver : str, optional (default=\"auto\")\n The optimizer to solve the underlying mathematical optimization\n problem. Supported solvers are `\"ecos\"\n <https://github.com/embotech/ecos>`_, `\"osqp\"\n <https://github.com/oxfordcontrol/osqp>`_, \"direct\", to choose the\n direct solver, and \"auto\", to choose the most appropriate solver for\n the problem.\n\n h_epsilon: float (default=1.35)\n The parameter h_epsilon used when ``objective=\"huber\"``, controls the\n number of samples that should be classified as outliers.\n\n quantile : float (default=0.5)\n The parameter quantile is the q-th quantile to be used when\n ``objective=\"quantile\"``.\n\n regularization: str or None (default=None)\n Type of regularization. Supported regularization are \"l1\" (Lasso) and\n \"l2\" (Ridge). If None, no regularization is applied.\n\n reg_l1 : float (default=1.0)\n L1 regularization term. Increasing this value will smooth the\n regression model. Only applicable if ``regularization=\"l1\"``.\n\n reg_l2 : float (default=1.0)\n L2 regularization term. Increasing this value will smooth the\n regression model. Only applicable if ``regularization=\"l2\"``.\n\n random_state : int, RandomState instance or None, (default=None)\n If ``n_subsamples < n_samples``, controls the shuffling applied to the\n data before applying the split.\n\n verbose : bool (default=False)\n Enable verbose output.\n \"\"\"\n def __init__(self, name=\"\", objective=\"l2\", degree=1,\n continuous=True, prebinning_method=\"cart\", max_n_prebins=20,\n min_prebin_size=0.05, min_n_bins=None, max_n_bins=None,\n min_bin_size=None, max_bin_size=None, monotonic_trend=\"auto\",\n n_subsamples=None, max_pvalue=None,\n max_pvalue_policy=\"consecutive\", outlier_detector=None,\n outlier_params=None, user_splits=None, user_splits_fixed=None,\n special_codes=None, split_digits=None, solver=\"auto\",\n h_epsilon=1.35, quantile=0.5, regularization=None, reg_l1=1.0,\n reg_l2=1.0, random_state=None, verbose=False):\n\n super().__init__(name, None, objective, degree, continuous,\n prebinning_method, max_n_prebins, min_prebin_size,\n min_n_bins, max_n_bins, min_bin_size, max_bin_size,\n monotonic_trend, n_subsamples, max_pvalue,\n max_pvalue_policy, outlier_detector, outlier_params,\n user_splits, user_splits_fixed, special_codes,\n split_digits, solver, h_epsilon, quantile,\n regularization, reg_l1, reg_l2, random_state, verbose)\n\n self._problem_type = \"regression\"\n\n self._n_records_missing = None\n self._n_records_special = None\n self._sum_special = None\n self._sum_missing = None\n self._std_special = None\n self._std_missing = None\n self._min_target_missing = None\n self._min_target_special = None\n self._max_target_missing = None\n self._max_target_special = None\n self._n_zeros_missing = None\n self._n_zeros_special = None\n\n def fit_transform(self, x, y, metric_special=0, metric_missing=0,\n lb=None, ub=None, check_input=False):\n \"\"\"Fit the optimal piecewise binning according to the given training\n data, then transform it.\n\n Parameters\n ----------\n x : array-like, shape = (n_samples,)\n Training vector, where n_samples is the number of samples.\n\n y : array-like, shape = (n_samples,)\n Target vector relative to x.\n\n metric_special : float or str (default=0)\n The metric value to transform special codes in the input vector.\n Supported metrics are \"empirical\" to use the empirical mean and any\n numerical value.\n\n metric_missing : float or str (default=0)\n The metric value to transform missing values in the input vector.\n Supported metrics are \"empirical\" to use the empirical mean and any\n numerical value.\n\n lb : float or None (default=None)\n Avoid values below the lower bound lb.\n\n ub : float or None (default=None)\n Avoid values above the upper bound ub.\n\n check_input : bool (default=False)\n Whether to check input arrays.\n\n Returns\n -------\n x_new : numpy array, shape = (n_samples,)\n Transformed array.\n \"\"\"\n return self.fit(x, y, check_input).transform(\n x, metric_special, metric_missing, lb, ub, check_input)\n\n def transform(self, x, metric_special=0, metric_missing=0,\n lb=None, ub=None, check_input=False):\n \"\"\"Transform given data using bins from the fitted optimal piecewise\n binning.\n\n Parameters\n ----------\n x : array-like, shape = (n_samples,)\n Training vector, where n_samples is the number of samples.\n\n metric_special : float or str (default=0)\n The metric value to transform special codes in the input vector.\n Supported metrics are \"empirical\" to use the empirical mean and any\n numerical value.\n\n metric_missing : float or str (default=0)\n The metric value to transform missing values in the input vector.\n Supported metrics are \"empirical\" to use the empirical mean and any\n numerical value.\n\n lb : float or None (default=None)\n Avoid values below the lower bound lb.\n\n ub : float or None (default=None)\n Avoid values above the upper bound ub.\n\n check_input : bool (default=False)\n Whether to check input arrays.\n\n Returns\n -------\n x_new : numpy array, shape = (n_samples,)\n Transformed array.\n \"\"\"\n self._check_is_fitted()\n\n return transform_continuous_target(\n self._optb.splits, x, self._c, lb, ub, self._n_records_special,\n self._sum_special, self._n_records_missing, self._sum_missing,\n self.special_codes, metric_special, metric_missing, check_input)\n\n def _fit(self, x, y, lb, ub, check_input):\n time_init = time.perf_counter()\n\n if self.verbose:\n self._logger.info(\"Optimal piecewise binning started.\")\n self._logger.info(\"Options: check parameters.\")\n\n _check_parameters(**self.get_params(deep=False), estimator=None,\n problem_type=self._problem_type)\n\n # Pre-processing\n if self.verbose:\n self._logger.info(\"Pre-processing started.\")\n\n self._n_samples = len(x)\n\n if self.verbose:\n self._logger.info(\"Pre-processing: number of samples: {}\"\n .format(self._n_samples))\n\n time_preprocessing = time.perf_counter()\n\n [x_clean, y_clean, x_missing, y_missing, x_special, y_special,\n _, _, _, _, _, _, _] = self._fit_preprocessing(x, y, check_input)\n\n self._time_preprocessing = time.perf_counter() - time_preprocessing\n\n if self.verbose:\n n_clean = len(x_clean)\n n_missing = len(x_missing)\n n_special = len(x_special)\n\n self._logger.info(\"Pre-processing: number of clean samples: {}\"\n .format(n_clean))\n\n self._logger.info(\"Pre-processing: number of missing samples: {}\"\n .format(n_missing))\n\n self._logger.info(\"Pre-processing: number of special samples: {}\"\n .format(n_special))\n\n if self.outlier_detector is not None:\n n_outlier = self._n_samples-(n_clean + n_missing + n_special)\n self._logger.info(\"Pre-processing: number of outlier samples: \"\n \"{}\".format(n_outlier))\n\n self._logger.info(\"Pre-processing terminated. Time: {:.4f}s\"\n .format(self._time_preprocessing))\n\n # Pre-binning\n self._time_estimator = 0\n\n # Fit optimal binning algorithm for continuous target. Use optimal\n # split points to compute optimal piecewise functions\n self._fit_binning(x_clean, y_clean, y_clean, lb, ub)\n\n # Post-processing\n if self.verbose:\n self._logger.info(\"Post-processing started.\")\n self._logger.info(\"Post-processing: compute binning information.\")\n\n time_postprocessing = time.perf_counter()\n\n # Compute n_records and sum for special and missing\n self._n_records_special = len(y_special)\n self._sum_special = np.sum(y_special)\n self._n_zeros_special = np.count_nonzero(y_special == 0)\n if len(y_special):\n self._std_special = np.std(y_special)\n self._min_target_special = np.min(y_special)\n self._max_target_special = np.max(y_special)\n\n self._n_records_missing = len(y_missing)\n self._sum_missing = np.sum(y_missing)\n self._n_zeros_missing = np.count_nonzero(y_missing == 0)\n if len(y_missing):\n self._std_missing = np.std(y_missing)\n self._min_target_missing = np.min(y_missing)\n self._max_target_missing = np.max(y_missing)\n\n bt = self._optb.binning_table.build(add_totals=False)\n n_records = bt[\"Count\"].values\n sums = bt[\"Sum\"].values\n stds = bt[\"Std\"].values\n min_target = bt[\"Min\"].values\n max_target = bt[\"Max\"].values\n n_zeros = bt[\"Zeros count\"].values\n\n n_records[self._n_bins] = self._n_records_special\n n_records[self._n_bins + 1] = self._n_records_missing\n sums[self._n_bins] = self._sum_special\n sums[self._n_bins + 1] = self._sum_missing\n stds[self._n_bins] = self._std_special\n stds[self._n_bins + 1] = self._std_missing\n min_target[self._n_bins] = self._min_target_special\n min_target[self._n_bins + 1] = self._min_target_missing\n max_target[self._n_bins] = self._max_target_special\n max_target[self._n_bins + 1] = self._max_target_missing\n n_zeros[self._n_bins] = self._n_zeros_special\n n_zeros[self._n_bins + 1] = self._n_zeros_missing\n\n # Compute metrics\n if self.verbose:\n self._logger.info(\"Post-processing: compute performance metrics.\")\n\n d_metrics = continuous_metrics(\n x_clean, y_clean, self._optb.splits, self._c, lb, ub,\n self._n_records_special, self._sum_special,\n self._n_records_missing, self._sum_missing, self.special_codes)\n\n # Binning table\n self._binning_table = PWContinuousBinningTable(\n self.name, self._optb.splits, self._c, n_records, sums, stds,\n min_target, max_target, n_zeros, lb, ub, x_clean.min(),\n x_clean.max(), d_metrics)\n\n self._time_postprocessing = time.perf_counter() - time_postprocessing\n\n if self.verbose:\n self._logger.info(\"Post-processing terminated. Time: {:.4f}s\"\n .format(self._time_postprocessing))\n\n self._time_total = time.perf_counter() - time_init\n\n if self.verbose:\n self._logger.info(\"Optimal piecewise binning terminated. \"\n \"Status: {}. Time: {:.4f}s\"\n .format(self._status, self._time_total))\n\n # Completed successfully\n self._class_logger.close()\n self._is_fitted = True\n\n return self\n" ]
[ [ "numpy.min", "numpy.max", "numpy.std", "numpy.count_nonzero", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mmwebster/DeepRL-Grounding
[ "aa7fa63fbc26e8b0fa3fe289a5fe5a00ef3e6278" ]
[ "a3c_train.py" ]
[ "import torch.optim as optim\nimport env as grounding_env\n\nfrom models import *\nfrom torch.autograd import Variable\n\nimport logging\n\n\ndef ensure_shared_grads(model, shared_model):\n for param, shared_param in zip(model.parameters(),\n shared_model.parameters()):\n if shared_param.grad is not None:\n return\n shared_param._grad = param.grad\n\n\ndef train(rank, args, shared_model):\n torch.manual_seed(args.seed + rank)\n\n env = grounding_env.GroundingEnv(args)\n env.game_init()\n\n model = A3C_LSTM_GA(args)\n\n if (args.load != \"0\"):\n print(str(rank) + \" Loading model ... \"+args.load)\n model.load_state_dict(\n torch.load(args.load, map_location=lambda storage, loc: storage))\n\n model.train()\n\n optimizer = optim.SGD(shared_model.parameters(), lr=args.lr)\n\n p_losses = []\n v_losses = []\n\n (image, instruction), _, _, _ = env.reset()\n instruction_idx = []\n for word in instruction.split(\" \"):\n instruction_idx.append(env.word_to_idx[word])\n instruction_idx = np.array(instruction_idx)\n\n image = torch.from_numpy(image).float()/255.0\n instruction_idx = torch.from_numpy(instruction_idx).view(1, -1)\n\n done = True\n\n episode_length = 0\n num_iters = 0\n while True:\n # Sync with the shared model\n model.load_state_dict(shared_model.state_dict())\n if done:\n episode_length = 0\n cx = Variable(torch.zeros(1, 256))\n hx = Variable(torch.zeros(1, 256))\n\n else:\n cx = Variable(cx.data)\n hx = Variable(hx.data)\n\n values = []\n log_probs = []\n rewards = []\n entropies = []\n\n for step in range(args.num_steps):\n episode_length += 1\n tx = Variable(torch.from_numpy(np.array([episode_length])).long())\n\n value, logit, (hx, cx) = model((Variable(image.unsqueeze(0)),\n Variable(instruction_idx),\n (tx, hx, cx)))\n prob = F.softmax(logit)\n log_prob = F.log_softmax(logit)\n entropy = -(log_prob * prob).sum(1)\n entropies.append(entropy)\n\n action = prob.multinomial(num_samples=1).data\n log_prob = log_prob.gather(1, Variable(action))\n\n action = action.numpy()[0, 0]\n (image, _), reward, done, _ = env.step(action)\n\n done = done or episode_length >= args.max_episode_length\n\n if done:\n (image, instruction), _, _, _ = env.reset()\n instruction_idx = []\n for word in instruction.split(\" \"):\n instruction_idx.append(env.word_to_idx[word])\n instruction_idx = np.array(instruction_idx)\n instruction_idx = torch.from_numpy(\n instruction_idx).view(1, -1)\n\n image = torch.from_numpy(image).float()/255.0\n\n values.append(value)\n log_probs.append(log_prob)\n rewards.append(reward)\n\n if done:\n break\n\n R = torch.zeros(1, 1)\n if not done:\n tx = Variable(torch.from_numpy(np.array([episode_length])).long())\n value, _, _ = model((Variable(image.unsqueeze(0)),\n Variable(instruction_idx), (tx, hx, cx)))\n R = value.data\n\n values.append(Variable(R))\n policy_loss = 0\n value_loss = 0\n R = Variable(R)\n\n gae = torch.zeros(1, 1)\n for i in reversed(range(len(rewards))):\n R = args.gamma * R + rewards[i]\n advantage = R - values[i]\n value_loss = value_loss + 0.5 * advantage.pow(2)\n\n # Generalized Advantage Estimataion\n delta_t = rewards[i] + args.gamma * \\\n values[i + 1].data - values[i].data\n gae = gae * args.gamma * args.tau + delta_t\n\n policy_loss = policy_loss - \\\n log_probs[i] * Variable(gae) - 0.01 * entropies[i]\n\n optimizer.zero_grad()\n\n p_losses.append(policy_loss.data[0, 0])\n v_losses.append(value_loss.data[0, 0])\n\n if(len(p_losses) > 1000):\n num_iters += 1\n print(\" \".join([\n \"Training thread: {}\".format(rank),\n \"Num iters: {}K\".format(num_iters),\n \"Avg policy loss: {}\".format(np.mean(p_losses)),\n \"Avg value loss: {}\".format(np.mean(v_losses))]))\n logging.info(\" \".join([\n \"Training thread: {}\".format(rank),\n \"Num iters: {}K\".format(num_iters),\n \"Avg policy loss: {}\".format(np.mean(p_losses)),\n \"Avg value loss: {}\".format(np.mean(v_losses))]))\n p_losses = []\n v_losses = []\n\n (policy_loss + 0.5 * value_loss).backward()\n torch.nn.utils.clip_grad_norm(model.parameters(), 40)\n\n ensure_shared_grads(model, shared_model)\n optimizer.step()\n" ]
[ [ "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mwojcik96/dtw-utterance-recognition
[ "9371393dfe92abb5b85c40828d099ceca599aa89" ]
[ "main.py" ]
[ "import glob\nimport struct\nimport wave\nfrom collections import Counter\nfrom operator import itemgetter\n\nimport librosa\nimport numpy as np\nfrom tslearn.metrics import dtw\n\n\ndef compute_mfcc_from_file(file):\n time_characteristic = create_time_characteristics_of_a_file(file)\n mfcc = librosa.feature.mfcc(y=time_characteristic, sr=16000, n_mfcc=13)\n return mfcc\n\n\ndef create_time_characteristics_of_a_file(file):\n wave_file = wave.open(file, 'r')\n # rate = wave_file.getframerate()\n length = wave_file.getnframes()\n time_plot = []\n for i in range(0, length):\n wave_data = wave_file.readframes(1)\n data = struct.unpack(\"<h\", wave_data)\n time_plot.append(int(data[0]))\n return np.array(time_plot, dtype=np.float32)\n\n\ndef compute_spectral_roloff(file):\n chars = create_time_characteristics_of_a_file(file)\n return librosa.feature.spectral_rolloff(chars, sr=16000)[0]\n\n\ndef calculate_dict(mfcc_values, rolloff_values, names, labels):\n final_dict = dict()\n for i in names:\n final_dict[i] = []\n for id1, (mf1, ro1, nm1, lb1) in enumerate(zip(mfcc_values, rolloff_values, names, labels)):\n for id2, (mf2, ro2, nm2, lb2) in enumerate(zip(mfcc_values, rolloff_values, names, labels)):\n if id1 < id2:\n current_dtw = dtw(mf1, mf2)\n # current_dtw = dtw(mf1 + ro1, mf2 + ro2)\n final_dict[nm1].append({\"name\": nm2, \"label\": lb2, \"distance\": current_dtw})\n final_dict[nm2].append({\"name\": nm1, \"label\": lb1, \"distance\": current_dtw})\n for final_key, final_item in final_dict.items():\n final_dict[final_key] = sorted(final_item, key=itemgetter('distance'))\n # print(key, len(final_dict[key]))\n return final_dict\n\n\ndef recognize_speech(vector, k=1):\n nearest_neighbours = Counter(elem[\"label\"] for elem in vector[:k])\n return nearest_neighbours.most_common(1)[0][0]\n\n\nif __name__ == '__main__':\n mfcc_list = []\n rolloff_list = []\n name_list = []\n label_list = []\n for wav_name in glob.glob(\"./*/*.WAV\"):\n mfcc_list.append(compute_mfcc_from_file(wav_name).T)\n rolloff_list.append(compute_spectral_roloff(wav_name))\n name_list.append(wav_name.split(\"/\")[-1])\n label_list.append(wav_name.split(\"/\")[-2])\n dist_dict = calculate_dict(mfcc_list, rolloff_list, name_list, label_list)\n for n in range(1, 11):\n accuracy = 0\n print(\"KNN for k =\", n)\n for key, item in dist_dict.items():\n real = label_list[name_list.index(key)]\n predicted = recognize_speech(item, n)\n # print(key, \"Real:\", real, \"Predicted:\", predicted)\n if real == predicted:\n accuracy += 1\n print(\"Accuracy:\", accuracy / len(name_list))\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mas-dse-ringhilt/DSE-American-Gut-Project
[ "dadb3be8d40d6fb325d26920b145c04c837a6869" ]
[ "american_gut_project_pipeline/pipeline/metrics.py" ]
[ "import pandas as pd\nfrom sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score\n\n\ndef evaluate(clf, x_train, x_test, y_train, y_test, name, training_data_name, embedding, params=None):\n predictions = clf.predict(x_train)\n # train_tn, train_fp, train_fn, train_tp = confusion_matrix(y_train, predictions).ravel()\n train_accuracy = accuracy_score(y_train, predictions)\n # train_precision = precision_score(y_train, predictions)\n # train_recall = recall_score(y_train, predictions)\n train_f1_score = f1_score(y_train, predictions, average='weighted')\n\n predictions = clf.predict(x_test)\n # test_tn, test_fp, test_fn, test_tp = confusion_matrix(y_test, predictions).ravel()\n test_accuracy = accuracy_score(y_test, predictions)\n # test_precision = precision_score(y_test, predictions)\n # test_recall = recall_score(y_test, predictions)\n test_f1_score = f1_score(y_test, predictions, average='weighted')\n\n result_dict = {\n 'name': [name],\n 'embedding': [embedding],\n 'params': [params],\n 'training_data_name': [training_data_name],\n # 'train_true_negative': [train_tn],\n # 'train_false_positive': [train_fp],\n # 'train_false_negative': [train_fn],\n # 'train_true_positive': [train_tp],\n 'train_accuracy': [train_accuracy],\n # 'train_precision': [train_precision],\n # 'train_recall': [train_recall],\n 'train_f1_score': [train_f1_score],\n\n # 'test_true_negative': [test_tn],\n # 'test_false_positive': [test_fp],\n # 'test_false_negative': [test_fn],\n # 'test_true_positive': [test_tp],\n 'test_accuracy': [test_accuracy],\n # 'test_precision': [test_precision],\n # 'test_recall': [test_recall],\n 'test_f1_score': [test_f1_score],\n }\n\n return pd.DataFrame(result_dict)\n" ]
[ [ "sklearn.metrics.f1_score", "pandas.DataFrame", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
grandevelia/ProDy
[ "7c725640a94c16543423c0756388998cb86a97ae", "7c725640a94c16543423c0756388998cb86a97ae" ]
[ "prody/chromatin/hic.py", "prody/atomic/improper.py" ]
[ "from numbers import Integral\n\nfrom numpy import ma\nimport numpy as np\nfrom scipy.sparse import coo_matrix\nfrom scipy.stats import mode\nfrom prody.chromatin.norm import VCnorm, SQRTVCnorm, Filenorm\nfrom prody.chromatin.functions import div0, showDomains, _getEigvecs\n\nfrom prody import PY2K\nfrom prody.dynamics import GNM, MaskedGNM\nfrom prody.dynamics.functions import writeArray\nfrom prody.dynamics.mode import Mode\nfrom prody.dynamics.modeset import ModeSet\n\nfrom prody.utilities import openFile, importLA, showMatrix, isURL, fixArraySize, makeSymmetric\n\n__all__ = ['HiC', 'parseHiC', 'parseHiCStream', 'parseHiCBinary', 'saveHiC', 'loadHiC', 'writeMap']\n\nclass HiC(object):\n\n \"\"\"This class is used to store and preprocess Hi-C contact map. A :class:`.GNM`\n instance for analyzing the contact map can be also created by using this class.\n \"\"\"\n\n def __init__(self, title='Unknown', map=None, bin=None):\n self._title = title\n self._map = None\n self.mask = False\n self._labels = 0\n self.masked = True\n self.bin = bin\n self.map = map\n \n @property\n def map(self):\n if self.masked:\n return self.getTrimedMap()\n else:\n return self._map\n\n @map.setter\n def map(self, value):\n if value is None: \n self._map = None\n else:\n self._map = np.asarray(value)\n self._map = makeSymmetric(self._map)\n self._maskUnmappedRegions()\n self._labels = np.zeros(len(self._map), dtype=int)\n\n def __repr__(self):\n mask = self.mask\n \n if np.isscalar(mask):\n return '<HiC: {0} ({1} loci)>'.format(self._title, len(self._map))\n else:\n return '<HiC: {0} ({1} mapped loci; {2} in total)>'.format(self._title, np.count_nonzero(mask), len(self._map))\n\n def __str__(self):\n\n return 'HiC ' + self._title\n\n def __getitem__(self, index):\n if isinstance(index, Integral):\n return self.map.flatten()[index]\n else:\n i, j = index\n return self.map[i,j]\n\n def __len__(self):\n mask = self.mask \n \n if np.isscalar(mask):\n return len(self._map)\n else:\n return np.count_nonzero(mask)\n \n def numAtoms(self):\n return len(self.map)\n\n def getTitle(self):\n \"\"\"Returns title of the instance.\"\"\"\n\n return self._title\n\n def setTitle(self, title):\n \"\"\"Sets title of the instance.\"\"\"\n\n self._title = str(title)\n\n def getCompleteMap(self):\n \"\"\"Obtains the complete contact map with unmapped regions.\"\"\"\n\n return self._map\n \n def getTrimedMap(self):\n \"\"\"Obtains the contact map without unmapped regions.\"\"\"\n\n if self._map is None: \n return None\n if np.isscalar(self.mask):\n return self._map\n\n M = ma.array(self._map)\n M.mask = np.diag(~self.mask)\n return ma.compress_rowcols(M)\n \n def align(self, array, axis=None):\n if not isinstance(array, np.ndarray):\n array = np.array(array)\n\n ret = array = array.copy()\n\n if np.isscalar(self.mask):\n return ret\n\n mask = self.mask.copy()\n\n l_full = self.getCompleteMap().shape[0]\n l_trim = self.getTrimedMap().shape[0]\n \n if len(array.shape) == 0:\n raise ValueError('array cannot be empty')\n elif len(array.shape) == 1:\n l = array.shape[0]\n if l == l_trim:\n N = len(mask)\n ret = np.zeros(N, dtype=array.dtype)\n ret[mask] = array\n elif l == l_full:\n ret = array[mask]\n else:\n raise ValueError('The length of array (%d) does not '\n 'match that of either the full (%d) '\n 'or trimed (%d).'\n %(l, l_full, l_trim))\n elif len(array.shape) == 2:\n s = array.shape\n\n if axis is None:\n if s[0] != s[1]:\n raise ValueError('The array must be a square matrix '\n 'if axis is set to None.')\n if s[0] == l_trim:\n N = len(mask)\n whole_mat = np.zeros((N,N), dtype=array.dtype)\n mask = np.outer(mask, mask)\n whole_mat[mask] = array.flatten()\n ret = whole_mat\n elif s[0] == l_full:\n M = ma.array(array)\n M.mask = np.diag(mask)\n ret = ma.compress_rowcols(M)\n else:\n raise ValueError('The size of array (%d) does not '\n 'match that of either the full (%d) '\n 'or trimed (%d).'\n %(s[0], l_full, l_trim))\n else:\n new_shape = list(s)\n otheraxis = 0 if axis!=0 else 1\n if s[axis] == l_trim:\n N = len(mask)\n new_shape[axis] = N\n whole_mat = np.zeros(new_shape)\n mask = np.expand_dims(mask, axis=otheraxis)\n mask = mask.repeat(s[otheraxis], axis=otheraxis)\n whole_mat[mask] = array.flatten()\n ret = whole_mat\n elif s[axis] == l_full:\n mask = np.expand_dims(mask, axis=otheraxis)\n mask = mask.repeat(s[otheraxis])\n ret = self._map[mask]\n else:\n raise ValueError('The size of array (%d) does not '\n 'match that of either the full (%d) '\n 'or trimed (%d).'\n %(s[0], l_full, l_trim))\n \n return ret\n\n def getKirchhoff(self):\n \"\"\"Builds a Kirchhoff matrix based on the contact map.\"\"\"\n\n if self._map is None:\n return None\n else:\n M = self.map\n \n I = np.eye(M.shape[0], dtype=bool)\n A = M.copy()\n A[I] = 0.\n D = np.diag(np.sum(A, axis=0))\n K = D - A\n return K\n\n def _maskUnmappedRegions(self, diag=False):\n \"\"\"Finds and masks unmapped regions in the contact map.\"\"\"\n\n M = self._map\n if M is None: return\n\n if diag:\n # Obtain the diagonal values, need to make sure d is an array \n # instead of a matrix, otherwise diag() later will not work as \n # intended.\n d = np.array(np.diag(M))\n else:\n d = np.array(M.sum(0))\n\n # mask if a diagonal value is zero\n mask_zero = np.array(d==0)\n # mask if a diagonal value is NAN\n mask_nan = np.isnan(d)\n # combine two masks\n mask = np.logical_or(mask_nan, mask_zero)\n self.mask = ~mask\n\n return self.mask\n \n def calcGNM(self, n_modes=None, **kwargs):\n \"\"\"Calculates GNM on the current Hi-C map. By default, ``n_modes`` is \n set to **None** and ``zeros`` to **True**.\"\"\"\n\n if 'zeros' not in kwargs:\n kwargs['zeros'] = True\n \n if self.masked:\n gnm = MaskedGNM(self._title, self.mask)\n else:\n gnm = GNM(self._title)\n gnm.setKirchhoff(self.getKirchhoff())\n gnm.calcModes(n_modes=n_modes, **kwargs)\n return gnm\n \n def normalize(self, method=VCnorm, **kwargs):\n \"\"\"Applies chosen normalization on the current Hi-C map.\"\"\"\n\n M = self._map\n N = method(M, **kwargs)\n self.map = N\n return N\n \n def setDomains(self, labels, **kwargs):\n \"\"\"Uses spectral clustering to identify structural domains on the chromosome.\n \n :arg labels: domain labels\n :type labels: :class:`~numpy.ndarray`, list\n\n :arg method: Label assignment algorithm used after Laplacian embedding.\n :type method: func\n \"\"\"\n wastrimmed = self.masked\n\n self.masked = True\n if len(labels) == self.numAtoms():\n full_length = self.numAtoms()\n if full_length != len(labels):\n _labels = np.empty(full_length)\n _labels.fill(np.nan)\n _labels[self.mask] = labels\n\n currlbl = labels[0]\n\n for i in range(len(_labels)):\n l = _labels[i]\n if np.isnan(l):\n _labels[i] = currlbl\n elif currlbl != l:\n currlbl = l\n labels = _labels\n else:\n self.masked = False\n if len(labels) != self.numAtoms():\n raise ValueError('The length of the labels should match either the length '\n 'of masked or complete Hi-C map. Turn off \"masked\" if '\n 'you intended to set the labels to the full map.')\n \n self.masked = wastrimmed\n self._labels = labels\n return self.getDomains()\n \n def getDomains(self):\n \"\"\"Returns an 1D :class:`numpy.ndarray` whose length is the number of loci. Each \n element is an index denotes to which domain the locus belongs.\"\"\"\n\n lbl = self._labels\n mask = self.mask\n if self.masked:\n lbl = lbl[mask]\n return lbl\n\n def getDomainList(self):\n \"\"\"Returns a list of domain separations. The list has two columns: the first is for \n the domain starts and the second is for the domain ends.\"\"\"\n\n indicators = np.diff(self.getDomains())\n indicators = np.append(1., indicators)\n indicators[-1] = 1\n sites = np.where(indicators != 0)[0]\n starts = sites[:-1]\n ends = sites[1:]\n domains = np.array([starts, ends]).T\n\n return domains\n\n def view(self, spec='p', **kwargs):\n \"\"\"Visualization of the Hi-C map and domains (if present). The function makes use \n of :func:`.showMatrix`.\n \n :arg spec: a string specifies how to preprocess the matrix. Blank for no preprocessing,\n 'p' for showing only data from *p*-th to *100-p*-th percentile. '_' is to suppress \n creating a new figure and paint to the current one instead. The letter specifications \n can be applied sequentially, e.g. 'p_'.\n :type spec: str\n\n :arg p: specifies the percentile threshold.\n :type p: double\n \"\"\"\n\n dm_kwargs = {}\n keys = list(kwargs.keys())\n for k in keys:\n if k.startswith('dm_'):\n dm_kwargs[k[3:]] = kwargs.pop(k)\n elif k.startswith('domain_'):\n dm_kwargs[k[7:]] = kwargs.pop(k)\n\n M = self.map\n if 'p' in spec:\n p = kwargs.pop('p', 5)\n lp = kwargs.pop('lp', p)\n hp = kwargs.pop('hp', 100-p)\n vmin = np.percentile(M, lp)\n vmax = np.percentile(M, hp)\n else:\n vmin = vmax = None\n\n if not 'vmin' in kwargs:\n kwargs['vmin'] = vmin\n if not 'vmax' in kwargs:\n kwargs['vmax'] = vmax\n\n im = showMatrix(M, **kwargs)\n\n domains = self.getDomainList()\n if len(domains) > 1:\n showDomains(domains, **dm_kwargs)\n\n return im\n\n def copy(self):\n new = type(self)()\n new.__dict__.update(self.__dict__)\n return new\n \n __copy__ = copy\n\n\ndef parseHiC(filename, **kwargs):\n \"\"\"Returns an :class:`.HiC` from a Hi-C data file.\n\n This function extends :func:`.parseHiCStream`.\n\n :arg filename: the filename to the Hi-C data file.\n :type filename: str\n \"\"\"\n\n import os, struct\n title = kwargs.get('title')\n if title is None:\n title = os.path.basename(filename)\n else:\n title = kwargs.pop('title')\n\n if isURL(filename):\n M, res = parseHiCBinary(filename, title=title, **kwargs)\n else:\n with open(filename,'rb') as req:\n magic_number = struct.unpack('<3s',req.read(3))[0]\n if magic_number == b\"HIC\":\n M, res = parseHiCBinary(filename, title=title, **kwargs)\n else:\n with open(filename, 'r') as filestream:\n M, res = parseHiCStream(filestream, title=title, **kwargs)\n \n hic = HiC(title=title, map=M, bin=res)\n\n return hic\n\ndef _sparse2dense(I, J, values, bin=None):\n I = np.asarray(I, dtype=int)\n J = np.asarray(J, dtype=int)\n values = np.asarray(values, dtype=float)\n # determine the bin size by the most frequent interval\n if bin is None:\n loci = np.unique(np.sort(I))\n bins = np.diff(loci)\n bin = mode(bins)[0][0]\n # convert coordinate from basepair to locus index\n bin = int(bin)\n I = I // bin\n J = J // bin\n # make sure that the matrix is square\n # if np.max(I) != np.max(J):\n # b = np.max(np.append(I, J))\n # I = np.append(I, b)\n # J = np.append(J, b)\n # values = np.append(values, 0.)\n # Convert to sparse matrix format, then full matrix format\n # and finally array type. Matrix format is avoided because\n # diag() won't work as intended for Matrix instances.\n M = np.array(coo_matrix((values, (I, J))).todense())\n return M, bin\n\ndef parseHiCStream(stream, **kwargs):\n \"\"\"Returns an :class:`.HiC` from a stream of Hi-C data lines.\n\n :arg stream: Anything that implements the method ``read``, ``seek``\n (e.g. :class:`file`, buffer, stdin)\n \"\"\"\n\n issparse = kwargs.get('sparse', None)\n\n import csv\n dialect = csv.Sniffer().sniff(stream.read(1024))\n stream.seek(0)\n reader = csv.reader(stream, dialect)\n D = list()\n for row in reader:\n d = list()\n for element in row:\n d.append(np.double(element))\n D.append(d)\n D = np.array(D)\n\n res = kwargs.get('bin', None)\n if res is not None:\n res = int(res)\n size = D.shape\n if len(D.shape) <= 1:\n raise ValueError(\"cannot parse the file: input file only contains one column.\")\n \n if issparse is None:\n issparse = size[1] == 3\n\n if not issparse:\n M = D\n else:\n try:\n I, J, values = D.T[:3]\n except ValueError:\n raise ValueError('the sparse matrix format should have three columns')\n \n M, res = _sparse2dense(I, J, values, bin=res)\n return M, res\n\ndef parseHiCBinary(filename, **kwargs):\n\n chrloc = kwargs.get('chrom', None)\n if chrloc is None:\n raise ValueError('chrom needs to be specified when parsing .hic format')\n chrloc1 = kwargs.get('chrom1', chrloc)\n chrloc2 = kwargs.get('chrom2', chrloc)\n norm = kwargs.get('norm', 'NONE')\n unit = kwargs.get('unit', 'BP')\n res = kwargs.get('binsize', None)\n res = kwargs.get('bin', res)\n if res is None:\n raise ValueError('bin needs to be specified when parsing .hic format')\n res = int(res)\n\n from .straw import straw\n result = straw(norm, filename, chrloc1, chrloc2, unit, res)\n\n M, res = _sparse2dense(*result, bin=res)\n return M, res\n\ndef writeMap(filename, map, bin=None, format='%f'):\n \"\"\"Writes *map* to the file designated by *filename*.\n\n :arg filename: the file to be written.\n :type filename: str\n\n :arg map: a Hi-C contact map.\n :type map: :class:`numpy.ndarray`\n\n :arg bin: bin size of the *map*. If bin is `None`, *map* will be \n written in full matrix format.\n :type bin: int\n\n :arg format: output format for map elements.\n :type format: str\n \"\"\"\n\n assert isinstance(map, np.ndarray), 'map must be a numpy.ndarray.'\n\n if bin is None:\n return writeArray(filename, map, format=format)\n else:\n L = int(map.size - np.diag(map).size)//2 + np.diag(map).size\n spmat = np.zeros((L, 3))\n m,n = map.shape\n l = 0\n for i in range(m):\n for j in range(i,n):\n spmat[l, 0] = i * bin\n spmat[l, 1] = j * bin\n spmat[l, 2] = map[i, j]\n l += 1\n fmt = ['%d', '%d', format]\n return writeArray(filename, spmat, format=fmt)\n\ndef saveHiC(hic, filename=None, map=True, **kwargs):\n \"\"\"Saves *HiC* model data as :file:`filename.hic.npz`. If *map* is **True**, \n Hi-C contact map will not be saved and it can be loaded from raw data file \n later. If *filename* is **None**, name of the Hi-C instance will be used as \n the filename, after ``\" \"`` (white spaces) in the name are replaced with \n ``\"_\"`` (underscores). Upon successful completion of saving, filename is \n returned. This function makes use of :func:`numpy.savez` function.\"\"\"\n\n assert isinstance(hic, HiC), 'hic must be a HiC instance.'\n \n if filename is None:\n filename = hic.getTitle().replace(' ', '_')\n \n if filename.endswith('.hic'):\n filename += '.npz'\n elif not filename.endswith('.hic.npz'):\n filename += '.hic.npz'\n\n attr_dict = hic.__dict__.copy()\n if not map:\n attr_dict.pop('_map')\n\n ostream = openFile(filename, 'wb', **kwargs)\n np.savez_compressed(ostream, **attr_dict)\n ostream.close()\n\n return filename\n\ndef loadHiC(filename):\n \"\"\"Returns HiC instance after loading it from file (*filename*).\n This function makes use of :func:`numpy.load` function. See also \n :func:`saveHiC`.\"\"\"\n \n attr_dict = np.load(filename)\n hic = HiC()\n\n keys = attr_dict.keys()\n\n for k in keys:\n val = attr_dict[k]\n if len(val.shape) == 0:\n val = np.asscalar(val)\n setattr(hic, k, val)\n return hic\n\ndef saveHiC_h5(hic, filename=None, **kwargs):\n \"\"\"Saves *HiC* model data as :file:`filename.hic.npz`. If *filename* is \n **None**, name of the Hi-C instance will be used as \n the filename, after ``\" \"`` (white spaces) in the name are replaced with \n ``\"_\"`` (underscores). Upon successful completion of saving, filename is \n returned. This function makes use of :func:`numpy.savez` function.\"\"\"\n\n try:\n import h5py\n except:\n raise ImportError('h5py needs to be installed for using this function')\n\n assert isinstance(hic, HiC), 'hic must be a HiC instance.'\n \n if filename is None:\n filename = hic.getTitle().replace(' ', '_')\n \n if filename.endswith('.hic'):\n filename += '.hic'\n elif not filename.endswith('.hic.h5'):\n filename += '.hic.h5'\n\n attr_dict = hic.__dict__.copy()\n\n with h5py.File(filename, 'w') as f:\n for key in attr_dict:\n value = attr_dict[key]\n compression = None if np.isscalar(value) else 'gzip'\n f.create_dataset(key, data=value, compression=compression)\n\n return filename\n\ndef loadHiC_h5(filename):\n \"\"\"Returns HiC instance after loading it from file (*filename*).\n This function makes use of :func:`numpy.load` function. See also \n :func:`saveHiC`.\"\"\"\n \n try:\n import h5py\n except:\n raise ImportError('h5py needs to be installed for using this function')\n\n hic = HiC()\n with h5py.File(filename, 'r') as f:\n for key in f.keys():\n try:\n value = f[key][:]\n except:\n value = f[key][()]\n setattr(hic, key, value)\n\n return hic\n", "# -*- coding: utf-8 -*-\n\"\"\"This module defines :class:`Improper` for dealing with improper information provided\nby using :meth:`.AtomGroup.setImpropers` method.\"\"\"\n\nfrom numbers import Integral\nimport numpy as np\n\nRAD2DEG = 180 / np.pi\n\n__all__ = ['Improper']\n\nclass Improper(object):\n\n \"\"\"A pointer class for improperd atoms. Following built-in functions are\n customized for this class:\n\n * :func:`len` returns improper length, i.e. :meth:`getSize`\n * :func:`iter` yields :class:`~.Atom` instances\"\"\"\n\n __slots__ = ['_ag', '_acsi', '_indices']\n\n def __init__(self, ag, indices, acsi=None):\n\n self._ag = ag\n self._indices = np.array(indices)\n if acsi is None:\n self._acsi = ag.getACSIndex()\n else:\n self._acsi = acsi\n\n def __repr__(self):\n\n one, two, three, four = self._indices\n names = self._ag._getNames()\n return '<Improper: {0}({1})--{2}({3})--{4}({5}--{6}({7})) from {8}>'.format(\n names[one], one, names[two], two,\n names[three], three, names[four], four,\n str(self._ag))\n\n def __str__(self):\n\n one, two, three, four = self._indices\n names = self._ag._getNames()\n return '{0}({1})--{2}({3})--{4}({5})--{6}({7})'.format(\n names[one], one, names[two], two,\n names[three], three, names[four], four)\n\n def __eq__(self, other):\n\n return (isinstance(other, Improper) and other.getAtomGroup() is self._ag\n and (np.all(other.getIndices() == self._indices) or\n np.all(other.getIndices() == list(reversed(self._indices)))))\n\n def __ne__(self, other):\n\n return not self.__eq__(other)\n\n def __size__(self):\n\n return self.getSize()\n\n def __iter__(self):\n\n for index in self._indices:\n yield self._ag[index]\n\n def getAtomGroup(self):\n \"\"\"Returns atom group.\"\"\"\n\n return self._ag\n\n def getAtoms(self):\n \"\"\"Returns improperd atoms.\"\"\"\n\n return (self._ag[self._indices[0]], self._ag[self._indices[1]], self._ag[self._indices[2]])\n\n def getIndices(self):\n \"\"\"Returns indices of improperd atoms.\"\"\"\n\n return self._indices.copy()\n\n def getSize(self):\n \"\"\"Returns improper size.\"\"\"\n\n one, two, three, four = self._indices\n acsi = self.getACSIndex()\n atoms1 = self._ag._coords[acsi, one]\n atoms2 = self._ag._coords[acsi, two]\n atoms3 = self._ag._coords[acsi, three]\n atoms4 = self._ag._coords[acsi, four]\n return calcImproper(atoms1, atoms2, atoms3, atoms4)\n\n def getVectors(self):\n \"\"\"Returns bond vectors that originate from the central atom.\"\"\"\n\n one, two, three, four = self._indices\n acsi = self.getACSIndex()\n vector1 = self._ag._coords[acsi, one] - self._ag._coords[acsi, two]\n vector2 = self._ag._coords[acsi, two] - self._ag._coords[acsi, three]\n vector3 = self._ag._coords[acsi, three] - self._ag._coords[acsi, four]\n return vector1, vector2, vector3\n\n def getACSIndex(self):\n \"\"\"Returns index of the coordinate set.\"\"\"\n\n acsi = self._acsi\n if acsi >= self._ag._n_csets:\n raise ValueError('{0} has fewer coordsets than assumed by {1}'\n .format(str(self._ag), str(self)))\n return acsi\n\n def setACSIndex(self, index):\n \"\"\"Set the coordinate set at *index* active.\"\"\"\n\n if self._ag._coords is None:\n raise AttributeError('coordinates are not set')\n\n if not isinstance(index, Integral):\n raise TypeError('index must be an integer')\n\n n_csets = self._ag._n_csets\n if n_csets <= index or n_csets < abs(index):\n raise IndexError('coordinate set index is out of range')\n\n if index < 0:\n index += n_csets\n\n self._acsi = index\n\n\ndef evalImpropers(impropers, n_atoms):\n \"\"\"Returns an array mapping atoms to their improperd neighbors and an array\n that stores number of impropers made by each atom.\"\"\"\n\n numimpropers = np.bincount(impropers.reshape((impropers.shape[0] * 4)))\n imap = np.zeros((n_atoms, numimpropers.max(), 3), int)\n imap.fill(-1)\n index = np.zeros(n_atoms, int)\n for improper in impropers:\n a, b, c, d = improper\n imap[a, index[a]] = [b, c, d]\n imap[b, index[b]] = [a, c, d]\n imap[c, index[c]] = [a, b, d]\n imap[d, index[d]] = [a, b, c]\n index[improper] += 1\n return imap, numimpropers\n\n\ndef trimImpropers(impropers, indices):\n \"\"\"Returns impropers between atoms at given indices.\"\"\"\n\n iset = set(indices)\n impropers = [improper for improper in impropers if improper[0]\n in iset and improper[1] in iset and improper[2] in iset and improper[3] in iset]\n if impropers:\n newindices = np.zeros(indices.max()+1, int)\n newindices[indices] = np.arange(len(indices))\n return newindices[np.array(impropers)]\n" ]
[ [ "numpy.diag", "numpy.expand_dims", "numpy.asarray", "numpy.ma.array", "numpy.where", "numpy.double", "numpy.ma.compress_rowcols", "scipy.sparse.coo_matrix", "numpy.asscalar", "numpy.eye", "numpy.diff", "numpy.count_nonzero", "numpy.load", "numpy.outer", "numpy.zeros", "numpy.isnan", "numpy.logical_or", "numpy.append", "numpy.array", "numpy.sum", "numpy.percentile", "numpy.sort", "numpy.savez_compressed", "numpy.isscalar", "scipy.stats.mode", "numpy.empty" ], [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
minhhoang1023/GamestonkTerminal
[ "195dc19b491052df080178c0cc6a9d535a91a704", "195dc19b491052df080178c0cc6a9d535a91a704", "195dc19b491052df080178c0cc6a9d535a91a704", "195dc19b491052df080178c0cc6a9d535a91a704", "195dc19b491052df080178c0cc6a9d535a91a704", "195dc19b491052df080178c0cc6a9d535a91a704", "195dc19b491052df080178c0cc6a9d535a91a704", "195dc19b491052df080178c0cc6a9d535a91a704", "195dc19b491052df080178c0cc6a9d535a91a704", "195dc19b491052df080178c0cc6a9d535a91a704" ]
[ "gamestonk_terminal/common/quantitative_analysis/rolling_model.py", "gamestonk_terminal/portfolio/portfolio_view.py", "gamestonk_terminal/cryptocurrency/nft/opensea_model.py", "gamestonk_terminal/common/behavioural_analysis/twitter_model.py", "gamestonk_terminal/cryptocurrency/overview/rekt_model.py", "gamestonk_terminal/cryptocurrency/defi/terramoney_fcd_model.py", "gamestonk_terminal/forex/av_view.py", "gamestonk_terminal/menu.py", "tests/gamestonk_terminal/stocks/sector_industry_analysis/test_stockanalysis_model.py", "discordbot/stocks/options/unu.py" ]
[ "\"\"\"Rolling Statistics\"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\nfrom typing import Tuple\n\nimport pandas as pd\nimport pandas_ta as ta\n\nfrom gamestonk_terminal.decorators import log_start_end\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef get_rolling_avg(df: pd.DataFrame, length: int) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Return rolling mean and standard deviation\n\n Parameters\n ----------\n df_stock : pd.DataFrame\n Dataframe of target data\n length : int\n Length of rolling window\n\n Returns\n -------\n pd.DataFrame :\n Dataframe of rolling mean\n pd.DataFrame :\n Dataframe of rolling standard deviation\n \"\"\"\n rolling_mean = df.rolling(length, center=True, min_periods=1).mean()\n rolling_std = df.rolling(length, center=True, min_periods=1).std()\n\n return pd.DataFrame(rolling_mean), pd.DataFrame(rolling_std)\n\n\n@log_start_end(log=logger)\ndef get_spread(df: pd.DataFrame, length: int) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Standard Deviation and Variance\n\n Parameters\n ----------\n df_stock : pd.DataFrame\n DataFrame of targeted data\n\n Returns\n -------\n df_sd : pd.DataFrame\n Dataframe of rolling standard deviation\n df_var : pd.DataFrame\n Dataframe of rolling standard deviation\n \"\"\"\n df_sd = ta.stdev(\n close=df,\n length=length,\n ).dropna()\n df_var = ta.variance(\n close=df,\n length=length,\n ).dropna()\n\n return pd.DataFrame(df_sd), pd.DataFrame(df_var)\n\n\n@log_start_end(log=logger)\ndef get_quantile(\n df: pd.DataFrame, length: int, quantile_pct: float\n) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Overlay Median & Quantile\n\n Parameters\n ----------\n df : pd.DataFrame\n Dataframe of targeted data\n length : int\n Length of window\n quantile : float\n Quantile to display\n\n Returns\n -------\n df_med : pd.DataFrame\n Dataframe of median prices over window\n df_quantile : pd.DataFrame\n Dataframe of gievn quantile prices over window\n \"\"\"\n df_med = ta.median(close=df, length=length).dropna()\n df_quantile = ta.quantile(\n df,\n length=length,\n q=quantile_pct,\n ).dropna()\n\n return pd.DataFrame(df_med), pd.DataFrame(df_quantile)\n\n\n@log_start_end(log=logger)\ndef get_skew(df: pd.DataFrame, length: int) -> pd.DataFrame:\n \"\"\"Skewness Indicator\n\n Parameters\n ----------\n df_stock : pd.DataFrame\n Dataframe of targeted data\n length : int\n Length of window\n\n Returns\n -------\n df_skew : pd.DataFrame\n Dataframe of rolling skew\n \"\"\"\n df_skew = ta.skew(close=df, length=length).dropna()\n return df_skew\n\n\n@log_start_end(log=logger)\ndef get_kurtosis(df: pd.DataFrame, length: int) -> pd.DataFrame:\n \"\"\"Kurtosis Indicator\n\n Parameters\n ----------\n df_stock : pd.DataFrame\n Dataframe of targeted data\n length : int\n Length of window\n\n Returns\n -------\n df_kurt : pd.DataFrame\n Dataframe of rolling kurtosis\n \"\"\"\n df_kurt = ta.kurtosis(close=df, length=length).dropna()\n return df_kurt\n", "\"\"\"Portfolio View\"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\nfrom typing import List, Optional\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nfrom gamestonk_terminal.config_terminal import theme\nfrom gamestonk_terminal.config_plot import PLOT_DPI\nfrom gamestonk_terminal.portfolio import (\n portfolio_model,\n)\n\nfrom gamestonk_terminal.helper_funcs import (\n plot_autoscale,\n export_data,\n)\nfrom gamestonk_terminal.decorators import log_start_end\nfrom gamestonk_terminal.rich_config import console\n\n# from reportlab.lib.pagesizes import letter\n# from reportlab.pdfgen import canvas\n# from reportlab.lib.utils import ImageReader\n# from gamestonk_terminal.portfolio import reportlab_helpers\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef load_info():\n \"\"\"Prints instructions to load a CSV\n\n Returns\n ----------\n text : str\n Information on how to load a csv\n \"\"\"\n text = \"\"\"\nIn order to load a CSV do the following:\n\n1. Add headers to the first row, below is data for each column:\\n\n\\t1. Identifier for the asset (such as a stock ticker)\n\\t2. Type of asset (stock, bond, option, crypto)\n\\t3. The volume of the asset transacted\n\\t4. The buy date in yyyy/mm/dd\n\\t5. The Price paid for the asset\n\\t6. Any fees paid during the transaction\n\\t7. A premium paid or received if this was an option\n\\t8. Whether the asset was bought (covered) or sold (shorted)\\n\n2. Place this file in gamestonk_terminal/portfolio/portfolios\\n\n \"\"\"\n console.print(text)\n\n\n@log_start_end(log=logger)\ndef display_returns_vs_bench(\n portfolio: portfolio_model.Portfolio,\n benchmark: str = \"SPY\",\n external_axes: Optional[plt.Axes] = None,\n):\n \"\"\"Display portfolio returns vs benchmark\n\n Parameters\n ----------\n portfolio: Portfolio\n Custom portfolio object with trade list\n benchmark: str\n Symbol for benchmark. Defaults to SPY\n external_axes: plt.Axes\n Optional axes to display plot on\n \"\"\"\n if external_axes is None:\n _, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)\n else:\n ax = external_axes\n\n portfolio.generate_holdings_from_trades()\n portfolio.add_benchmark(benchmark)\n\n cumulative_returns = (1 + portfolio.returns).cumprod()\n benchmark_c_returns = (1 + portfolio.benchmark_returns).cumprod()\n\n ax.plot(cumulative_returns.index, cumulative_returns, label=\"Portfolio\")\n ax.plot(benchmark_c_returns.index, benchmark_c_returns, label=\"Benchmark\")\n ax.set_ylabel(\"Cumulative Returns\")\n ax.legend(loc=\"upper left\")\n theme.style_primary_axis(ax)\n if not external_axes:\n theme.visualize_output()\n\n\n@log_start_end(log=logger)\ndef display_allocation(\n portfolio: portfolio_model.Portfolio,\n export: str = \"\",\n external_axes: Optional[plt.Axes] = None,\n):\n \"\"\"Display allocation of assets vs time\n\n Parameters\n ----------\n portfolio: Portfolio\n Portfolio object with trades loaded\n export: str\n Format to export plot\n external_axes: plt.Axes\n Optional axes to display plot on\n \"\"\"\n portfolio.generate_holdings_from_trades()\n all_holdings = pd.concat(\n [\n portfolio.portfolio[\"StockHoldings\"],\n portfolio.portfolio[\"ETFHoldings\"],\n portfolio.portfolio[\"CryptoHoldings\"],\n ],\n axis=1,\n )\n all_holdings = all_holdings.drop(columns=[\"temp\"])\n\n if external_axes is None:\n _, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)\n else:\n ax = external_axes\n all_holdings.plot(ax=ax)\n ax.set_title(\"Individual Asset Holdings\")\n ax.legend(loc=\"upper left\")\n ax.set_ylabel(\"Holdings ($)\")\n theme.style_primary_axis(ax)\n if external_axes is None:\n theme.visualize_output()\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"rolling\",\n )\n\n\n@log_start_end(log=logger)\ndef display_rolling_stats(\n portfolio: portfolio_model.Portfolio,\n length: int = 60,\n benchmark: str = \"SPY\",\n risk_free_rate: float = 0,\n external_axes: Optional[List[plt.Axes]] = None,\n export: str = \"\",\n):\n \"\"\"Display portfolio returns vs benchmark\n\n Parameters\n ----------\n portfolio: Portfolio\n Custom portfolio object with trade list\n length: int\n Length of rolling window\n benchmark: str\n Symbol for benchmark. Defaults to SPY\n risk_free_rate: float\n Value to use for risk free rate in sharpe/other calculations\n external_axes: Optional[List[plt.Axes]]\n Optional axes to display plot on\n export: str\n Export to file\n \"\"\"\n portfolio.generate_holdings_from_trades()\n portfolio.add_benchmark(benchmark)\n portfolio.add_rf(risk_free_rate)\n if external_axes is None:\n _, ax = plt.subplots(4, 1, figsize=(8, 8), dpi=PLOT_DPI, sharex=True)\n else:\n if len(external_axes) != 4:\n console.print(\"[red]4 axes expected./n[/red]\")\n return\n ax = external_axes\n rolling_volatility = portfolio.returns.rolling(length).std()\n rolling_volatility_bench = portfolio.benchmark_returns.rolling(length).std()\n\n rolling_sharpe = portfolio.returns.rolling(length).apply(\n lambda x: (x.mean() - risk_free_rate) / x.std()\n )\n rolling_sharpe_bench = portfolio.benchmark_returns.rolling(length).apply(\n lambda x: (x.mean() - risk_free_rate) / x.std()\n )\n\n rolling_volatility.plot(ax=ax[1])\n rolling_volatility_bench.plot(ax=ax[1])\n ax[1].set_title(\"Rolling Volatility\")\n\n rolling_sharpe.plot(ax=ax[2])\n rolling_sharpe_bench.plot(ax=ax[2])\n ax[2].set_title(\"Rolling Sharpe Ratio\")\n\n # Rolling beta is defined as Cov(Port,Bench)/var(Bench)\n covs = (\n pd.DataFrame(\n {\"Portfolio\": portfolio.returns, \"Benchmark\": portfolio.benchmark_returns}\n )\n .dropna(axis=0)\n .rolling(length)\n .cov()\n .unstack()\n .dropna()\n )\n rolling_beta = covs[\"Portfolio\"][\"Benchmark\"] / covs[\"Benchmark\"][\"Benchmark\"]\n rolling_beta.plot(ax=ax[3])\n ax[3].set_title(\"Rolling Beta to Benchmark\")\n\n c_returns = (1 + portfolio.returns).cumprod()\n bench_c_rets = (1 + portfolio.benchmark_returns).cumprod()\n\n ax[0].plot(c_returns.index, c_returns)\n ax[0].plot(bench_c_rets.index, bench_c_rets)\n ax[0].set_title(\"Cumulative Returns\")\n\n if external_axes is None:\n\n for a in ax[0], ax[1], ax[2]:\n a.legend([\"Portfolio\", \"Benchmark\"], loc=\"upper left\")\n for a in ax[0], ax[1], ax[2], ax[3]:\n a.set_xlim(portfolio.returns.index[0], portfolio.returns.index[-1])\n a.set_xlabel([])\n a.grid(\"on\")\n theme.style_primary_axis(a)\n\n ax[3].set_xlabel(\"Date\")\n\n theme.visualize_output()\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"rolling\",\n )\n\n\n@log_start_end(log=logger)\ndef display_drawdown(\n holdings: pd.DataFrame,\n export: str = \"\",\n external_axes: Optional[List[plt.Axes]] = None,\n):\n \"\"\"Display drawdown curve\n\n Parameters\n ----------\n holdings: pd.DataFrame\n Dataframe of holdings vs time\n export: str\n Format to export data\n external_axes: plt.Axes\n Optional axes to display plot on\n \"\"\"\n drawdown = portfolio_model.calculate_drawdown(holdings)\n if external_axes is None:\n _, ax = plt.subplots(2, 1, figsize=plot_autoscale(), dpi=PLOT_DPI, sharex=True)\n else:\n ax = external_axes\n\n ax[0].plot(holdings.index, holdings)\n ax[0].set_title(\"Holdings\")\n ax[1].plot(holdings.index, drawdown)\n ax[1].fill_between(holdings.index, np.asarray(drawdown), alpha=0.4)\n ax[1].set_title(\"Portfolio Drawdown\")\n\n theme.style_primary_axis(ax[1])\n if external_axes is None:\n theme.visualize_output()\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"dd\",\n )\n\n\n#\n# @log_start_end(log=logger)\n# def plot_overall_return(\n# comb: pd.DataFrame, m_tick: str, plot: bool = False\n# ) -> ImageReader:\n# \"\"\"Generates overall return graph\n#\n# Parameters\n# ----------\n# comb : pd.DataFrame\n# Dataframe with returns\n# m_tick : str\n# The ticker for the market asset\n# plot : bool\n# Whether to plot the graph or return it for PDF\n#\n# Returns\n# ----------\n# img : ImageReader\n# Overal return graph\n# \"\"\"\n# fig, ax = plt.subplots(figsize=(10, 5))\n# ax.plot(comb.index, comb[\"return\"], color=\"tab:blue\", label=\"Portfolio\")\n# ax.plot(comb.index, comb[(\"Market\", \"Return\")], color=\"orange\", label=m_tick)\n#\n# ax.set_ylabel(\"\", fontweight=\"bold\", fontsize=12, color=\"black\")\n# ax.set_xlabel(\"\")\n# ax.yaxis.set_label_coords(-0.1, 0.5)\n# ax.grid(True)\n# ax.spines[\"top\"].set_visible(False)\n# ax.spines[\"right\"].set_visible(False)\n# ax.spines[\"bottom\"].set_visible(False)\n# ax.spines[\"left\"].set_visible(False)\n# fig.suptitle(\n# \"Cumulative Performance\", y=0.99, fontweight=\"bold\", fontsize=14, color=\"black\"\n# )\n# ax.axhline(0, ls=\"-\", lw=1, color=\"gray\", zorder=1)\n# ax.axhline(0, ls=\"--\", lw=1, color=\"black\", zorder=2)\n# fig.set_facecolor(\"white\")\n# ax.set_title(\n# f'{comb.index[:1][0].strftime(\"%Y/%m/%d\")} - {comb.index[-1:][0].strftime(\"%Y/%m/%d\")}',\n# fontsize=12,\n# color=\"gray\",\n# )\n# ax.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))\n# ax.set_facecolor(\"white\")\n# ax.legend()\n# fig.autofmt_xdate()\n# if plot:\n# plt.show()\n# console.print(\"\")\n# return None\n# imgdata = BytesIO()\n# fig.savefig(imgdata, format=\"png\")\n# plt.close(\"all\")\n# imgdata.seek(0)\n# return ImageReader(imgdata)\n#\n#\n# @log_start_end(log=logger)\n# def plot_rolling_beta(df: pd.DataFrame) -> ImageReader:\n# \"\"\"Returns a chart with the portfolio's rolling beta\n#\n# Parameters\n# ----------\n# df : pd.DataFrame\n# The dataframe to be analyzed\n#\n# Returns\n# ----------\n# img : ImageReader\n# Rolling beta graph\n# \"\"\"\n#\n# fig, ax = plt.subplots(figsize=(10, 5))\n# ax.plot(\n# df.index,\n# df[\"total\"],\n# color=\"tab:blue\",\n# )\n#\n# ax.set_ylabel(\"\", fontweight=\"bold\", fontsize=12, color=\"black\")\n# ax.set_xlabel(\"\")\n# ax.yaxis.set_label_coords(-0.1, 0.5)\n# ax.grid(True)\n# ax.spines[\"top\"].set_visible(False)\n# ax.spines[\"right\"].set_visible(False)\n# ax.spines[\"bottom\"].set_visible(False)\n# ax.spines[\"left\"].set_visible(False)\n# fig.suptitle(\n# \"Rolling Beta of Stocks\", y=0.99, fontweight=\"bold\", fontsize=14, color=\"black\"\n# )\n# ax.axhline(0, ls=\"-\", lw=1, color=\"gray\", zorder=1)\n# ax.axhline(0, ls=\"--\", lw=1, color=\"black\", zorder=2)\n# fig.set_facecolor(\"white\")\n# ax.set_title(\n# f'{df.index[:1][0].strftime(\"%Y-%m-%d\")} - {df.index[-1:][0].strftime(\"%Y-%m-%d\")}',\n# color=\"gray\",\n# )\n# ax.set_facecolor(\"white\")\n# fig.autofmt_xdate()\n# imgdata = BytesIO()\n# fig.savefig(imgdata, format=\"png\")\n# plt.close(\"all\")\n# imgdata.seek(0)\n# return ImageReader(imgdata)\n#\n#\n# @log_start_end(log=logger)\n# def plot_ef(\n# stocks: List[str],\n# variance: float,\n# per_ret: float,\n# rf_rate: float,\n# period: str = \"3mo\",\n# n_portfolios: int = 300,\n# risk_free: bool = False,\n# ):\n# \"\"\"Display efficient frontier\n#\n# Parameters\n# ----------\n# stocks : List[str]\n# List of the stocks to be included in the weights\n# variance : float\n# The variance for the portfolio\n# per_ret : float\n# The portfolio's return for the portfolio\n# rf_rate : float\n# The risk free rate\n# period : str\n# The period to track\n# n_portfolios : int\n# The number of portfolios to generate\n# risk_free : bool\n# Include the risk-free asset\n# \"\"\"\n# fig, ax = plt.subplots(figsize=(10, 5), dpi=PLOT_DPI)\n# ef, rets, stds = optimizer_model.generate_random_portfolios(\n# [x.upper() for x in stocks], period, n_portfolios\n# )\n# sharpes = rets / stds\n# ax.scatter(stds, rets, marker=\".\", c=sharpes, cmap=\"viridis_r\")\n# plotting.plot_efficient_frontier(ef, ax=ax, show_assets=True)\n# # Find the tangency portfolio\n# ret_sharpe, std_sharpe, _ = ef.portfolio_performance(risk_free_rate=rf_rate)\n# ax.scatter(std_sharpe, ret_sharpe, marker=\"*\", s=100, c=\"r\", label=\"Max Sharpe\")\n# plt.plot(variance, per_ret, \"ro\", label=\"Portfolio\")\n# # Add risk free line\n# if risk_free:\n# y = ret_sharpe * 1.2\n# m = (ret_sharpe - rf_rate) / std_sharpe\n# x2 = (y - rf_rate) / m\n# x = [0, x2]\n# y = [rf_rate, y]\n# line = Line2D(x, y, color=\"#FF0000\", label=\"Capital Allocation Line\")\n# ax.set_xlim(xmin=min(stds) * 0.8)\n# ax.add_line(line)\n# ax.set_title(f\"Efficient Frontier simulating {n_portfolios} portfolios\")\n# ax.legend()\n# fig.tight_layout()\n# ax.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n#\n# if gtff.USE_ION:\n# plt.ion()\n#\n# imgdata = BytesIO()\n# fig.savefig(imgdata, format=\"png\")\n# plt.close(\"all\")\n# imgdata.seek(0)\n# return ImageReader(imgdata)\n\n\n# @log_start_end(log=logger)\n# def display_allocation2(data: pd.DataFrame, graph: bool):\n# \"\"\"Displays allocation\n# Parameters\n# ----------\n# data: pd.DataFrame\n# The portfolio allocation dataframe\n# graph: bool\n# If pie chart shall be displayed with table\"\"\"\n#\n# print_rich_table(data, headers=list(data.columns), title=\"Allocation\")\n# console.print(\"\")\n#\n# if graph:\n# graph_data = data[data[\"pct_allocation\"] >= 5].copy()\n# if not graph_data.empty:\n# graph_data.loc[\"Other\"] = [\n# \"NA\",\n# data[\"value\"].sum() - graph_data[\"value\"].sum(),\n# 100 - graph_data[\"value\"].sum(),\n# ]\n# labels = graph_data.index.values\n# sizes = graph_data[\"value\"].to_list()\n# else:\n# labels = data.index.values\n# sizes = data[\"value\"].to_list()\n# fig, ax = plt.subplots()\n# ax.pie(sizes, labels=labels, autopct=\"%1.1f%%\", startangle=90)\n# ax.axis(\"equal\")\n# ax.set_title(\"Portfolio Allocation\")\n# fig.set_tight_layout(True)\n#\n# plt.show()\n\n#\n# class Report:\n# @log_start_end(log=logger)\n# def __init__(self, df: pd.DataFrame, hist: pd.DataFrame, m_tick: str):\n# \"\"\"Generate financial reports.\n# Financial reports allow users to show the how they have been performing in\n# trades. This allows for a simple way to show progress and analyze metrics\n# that track portfolio performance\n#\n# Parameters\n# ----------\n# df : pd.DataFrame\n# The dataframe with previous holdings information\n# hist : pd.DataFrame\n# The dataframe with previous prices for stocks in the portfolio\n# df_m : pd.DataFrame\n# Dataframe of benchmark\n# n : int\n# The number of days to analyze\n#\n# Attributes\n# ----------\n# generate_report : None\n# Generates a report with the given parameters\n# generate_pg1 : None\n# Creates the first page of the PDF report\n# generate_pg2 : None\n# Creates the second page of the PDF report\n#\n# \"\"\"\n# self.df = df\n# self.hist = hist\n# self.m_tick = m_tick\n# self.df_m = yfinance_model.get_market(self.df.index[0], self.m_tick)\n# # self.returns, self.variance = portfolio_model.get_return(df, self.df_m, n)\n# self.returns = pd.DataFrame()\n# self.rf = get_rf()\n# self.betas = portfolio_model.get_rolling_beta(\n# self.df, self.hist, self.df_m, 365\n# )\n#\n# @log_start_end(log=logger)\n# def generate_report(self) -> None:\n# d = path.dirname(path.abspath(__file__)).replace(\n# \"gamestonk_terminal\", \"exports\"\n# )\n# loc = path.abspath(\n# path.join(\n# d,\n# f\"ar_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf\",\n# )\n# )\n# report = canvas.Canvas(loc, pagesize=letter)\n# reportlab_helpers.base_format(report, \"Overview\")\n# self.generate_pg1(report)\n# self.generate_pg2(report)\n# report.save()\n# console.print(\"File save in:\\n\", loc, \"\\n\")\n#\n# @log_start_end(log=logger)\n# def generate_pg1(self, report: canvas.Canvas) -> None:\n# report.drawImage(\n# plot_overall_return(self.returns, self.m_tick, False), 15, 400, 600, 300\n# )\n# main_text = portfolio_model.get_main_text(self.returns)\n# reportlab_helpers.draw_paragraph(report, main_text, 30, 410, 550, 200)\n# current_return = self.returns[\"return\"][-1]\n# beta = self.betas[\"total\"][-1]\n# market_return = self.returns[(\"Market\", \"Return\")][-1]\n# sharpe = f\"{(current_return - self.rf)/ np.std(self.returns['return']):.2f}\"\n# treynor = f\"{(current_return - self.rf)/ beta:.2f}\" if beta > 0 else \"N/A\"\n# alpha = f\"{current_return - (self.rf + beta * (market_return - self.rf)):.2f}\"\n# information = (\n# f\"{float(alpha)/ (np.std(self.returns['return'] - market_return)):.2f}\"\n# )\n# perf = [\n# [\"Sharpe\", sharpe],\n# [\"Treynor\", treynor],\n# [\"Alpha\", alpha],\n# [\"Information\", information],\n# ]\n# reportlab_helpers.draw_table(report, \"Performance\", 540, 300, 30, perf)\n# reportlab_helpers.draw_paragraph(\n# report, portfolio_model.performance_text, 140, 290, 460, 200\n# )\n# report.showPage()\n#\n# @log_start_end(log=logger)\n# def generate_pg2(self, report: canvas.Canvas) -> None:\n# reportlab_helpers.base_format(report, \"Portfolio Analysis\")\n# if \"Holding\" in self.df.columns:\n# report.drawImage(plot_rolling_beta(self.betas), 15, 400, 600, 300)\n# main_t = portfolio_model.get_beta_text(self.betas)\n# reportlab_helpers.draw_paragraph(report, main_t, 30, 410, 550, 200)\n# # report.drawImage(plot_ef(uniques, self.variance, self.returns[\"return\"][-1], self.rf), 15, 65, 600, 300)\n", "\"\"\" opensea.io Model \"\"\"\n\nimport logging\nfrom datetime import datetime\n\nimport pandas as pd\nimport requests\n\nfrom gamestonk_terminal.decorators import log_start_end\n\nlogger = logging.getLogger(__name__)\n\nAPI_URL = \"https://api.opensea.io/api/v1\"\n\n\n@log_start_end(log=logger)\ndef get_collection_stats(slug: str) -> pd.DataFrame:\n \"\"\"Get stats of a nft collection [Source: opensea.io]\n\n Parameters\n -------\n slug : str\n Opensea collection slug. If the name of the collection is Mutant Ape Yacht Club the slug is mutant-ape-yacht-club\n\n Returns\n -------\n pd.DataFrame\n collection stats\n \"\"\"\n res = requests.get(f\"{API_URL}/collection/{slug}\")\n if res.status_code == 200:\n data = res.json()\n collection = data[\"collection\"]\n stats = collection[\"stats\"]\n metrics = [\n \"Name\",\n \"Floor Price (ETH)\",\n \"Number of Owners\",\n \"Market Cap (ETH)\",\n \"Average Price ETH\",\n \"One day volume (ETH)\",\n \"One day change (%)\",\n \"One day sales (ETH)\",\n \"One day average price (ETH)\",\n \"Thirty day volume (ETH)\",\n \"Thirty day change (%)\",\n \"Thirty day sales (ETH)\",\n \"Thirty day average price (ETH)\",\n \"Total Supply (ETH)\",\n \"Total Sales (ETH)\",\n \"Total Volume (ETH)\",\n \"Creation Date\",\n \"URL\",\n ]\n values = [\n collection[\"name\"],\n \"-\" if not stats[\"floor_price\"] else float(stats[\"floor_price\"]),\n round(float(stats[\"num_owners\"]), 2),\n round(float(stats[\"market_cap\"]), 2),\n round(float(stats[\"average_price\"]), 2),\n round(float(stats[\"one_day_volume\"]), 2),\n round(float(stats[\"one_day_change\"]) * 100, 2),\n round(float(stats[\"one_day_sales\"]), 2),\n round(float(stats[\"one_day_average_price\"]), 2),\n round(float(stats[\"thirty_day_volume\"]), 2),\n round(float(stats[\"thirty_day_change\"]) * 100, 2),\n round(float(stats[\"thirty_day_sales\"]), 2),\n round(float(stats[\"thirty_day_average_price\"]), 2),\n round(float(stats[\"total_supply\"]), 2),\n round(float(stats[\"total_sales\"]), 2),\n round(float(stats[\"total_volume\"]), 2),\n datetime.strptime(\n collection[\"created_date\"], \"%Y-%m-%dT%H:%M:%S.%f\"\n ).strftime(\"%b %d, %Y\"),\n \"-\" if not collection[\"external_url\"] else collection[\"external_url\"],\n ]\n df = pd.DataFrame({\"Metric\": metrics, \"Value\": values})\n return df\n return pd.DataFrame()\n", "\"\"\"Twitter Model\"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\nfrom typing import Optional\n\nimport pandas as pd\nimport requests\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\nfrom gamestonk_terminal import config_terminal as cfg\nfrom gamestonk_terminal.decorators import log_start_end\nfrom gamestonk_terminal.helper_funcs import clean_tweet, get_data\nfrom gamestonk_terminal.rich_config import console\n\nlogger = logging.getLogger(__name__)\n\nanalyzer = SentimentIntensityAnalyzer()\n\n\n@log_start_end(log=logger)\ndef load_analyze_tweets(\n ticker: str,\n count: int,\n start_time: Optional[str] = \"\",\n end_time: Optional[str] = \"\",\n) -> pd.DataFrame:\n \"\"\"Load tweets from twitter API and analyzes using VADER\n\n Parameters\n ----------\n ticker: str\n Ticker to search twitter for\n count: int\n Number of tweets to analyze\n start : Optional[str]\n If given, the start time to get tweets from\n end : Optional[str]\n If given, the end time to get tweets from\n\n Returns\n -------\n df_tweet: pd.DataFrame\n Dataframe of tweets and sentiment\n \"\"\"\n params = {\n \"query\": rf\"(\\${ticker}) (lang:en)\",\n \"max_results\": str(count),\n \"tweet.fields\": \"created_at,lang\",\n }\n\n if start_time:\n # Assign from and to datetime parameters for the API\n params[\"start_time\"] = start_time\n if end_time:\n params[\"end_time\"] = end_time\n\n # Request Twitter API\n response = requests.get(\n \"https://api.twitter.com/2/tweets/search/recent\",\n params=params, # type: ignore\n headers={\"authorization\": \"Bearer \" + cfg.API_TWITTER_BEARER_TOKEN},\n )\n\n # Create dataframe\n df_tweets = pd.DataFrame()\n\n # Check that the API response was successful\n if response.status_code == 200:\n for tweet in response.json()[\"data\"]:\n row = get_data(tweet)\n df_tweets = df_tweets.append(row, ignore_index=True)\n elif response.status_code == 401:\n console.print(\"Twitter API Key provided is incorrect\\n\")\n return pd.DataFrame()\n elif response.status_code == 400:\n console.print(\n \"Status Code 400. This means you are requesting data from beyond the API's 7 day limit\"\n )\n return pd.DataFrame()\n\n sentiments = []\n pos = []\n neg = []\n neu = []\n\n for s_tweet in df_tweets[\"text\"].to_list():\n tweet = clean_tweet(s_tweet, ticker)\n sentiments.append(analyzer.polarity_scores(tweet)[\"compound\"])\n pos.append(analyzer.polarity_scores(tweet)[\"pos\"])\n neg.append(analyzer.polarity_scores(tweet)[\"neg\"])\n neu.append(analyzer.polarity_scores(tweet)[\"neu\"])\n # Add sentiments to tweets dataframe\n df_tweets[\"sentiment\"] = sentiments\n df_tweets[\"positive\"] = pos\n df_tweets[\"negative\"] = neg\n df_tweets[\"neutral\"] = neu\n\n return df_tweets\n", "\"\"\"Blockchain Center Model\"\"\"\nimport logging\nfrom typing import List, Union\n\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom requests.adapters import HTTPAdapter, RetryError\nfrom urllib3.util.retry import Retry\n\nfrom gamestonk_terminal.decorators import log_start_end\nfrom gamestonk_terminal.helper_funcs import get_user_agent\nfrom gamestonk_terminal.rich_config import console\n\nlogger = logging.getLogger(__name__)\n\nHACKS_COLUMNS = [\"Platform\", \"Date\", \"Amount [$]\", \"Audit\", \"Slug\", \"URL\"]\n\n\n@log_start_end(log=logger)\ndef _retry_session(\n url: str, retries: int = 3, backoff_factor: float = 1.0\n) -> requests.Session:\n \"\"\"Helper methods that retries to make request\n\n\n Parameters\n ----------\n url: str\n Url to mount a session\n retries: int\n How many retries\n backoff_factor: float\n Backoff schema - time periods between retry\n\n Returns\n -------\n requests.Session\n Mounted session\n \"\"\"\n\n session = requests.Session()\n retry = Retry(\n total=retries,\n read=retries,\n connect=retries,\n status_forcelist=[500, 502, 503, 504],\n backoff_factor=backoff_factor,\n )\n adapter = HTTPAdapter(max_retries=retry)\n session.mount(url, adapter)\n return session\n\n\n@log_start_end(log=logger)\ndef _make_request(url: str) -> Union[BeautifulSoup, None]:\n \"\"\"Helper method to scrap\n\n Parameters\n ----------\n url : str\n url to scrape\n\n Returns\n -------\n BeautifulSoup object\n \"\"\"\n headers = {\"User-Agent\": get_user_agent()}\n session = _retry_session(\"https://www.coingecko.com\")\n try:\n req = session.get(url, headers=headers, timeout=5)\n except Exception as error:\n console.print(error)\n raise RetryError(\n \"Connection error. Couldn't connect to CoinGecko and scrape the data. \"\n \"Please visit CoinGecko site, and check if it's not under maintenance\"\n ) from error\n\n if req.status_code == 404:\n return None\n\n if req.status_code >= 400:\n raise Exception(\n f\"Couldn't connect to {url}. Status code: {req.status_code}. Reason: {req.reason}\"\n )\n\n return BeautifulSoup(req.text, features=\"lxml\")\n\n\n@log_start_end(log=logger)\ndef get_crypto_hacks() -> pd.DataFrame:\n \"\"\"Get major crypto-related hacks\n [Source: https://rekt.news]\n\n Parameters\n ----------\n\n Returns\n -------\n pandas.DataFrame:\n Hacks with columns {Platform,Date,Amount [$],Audited,Slug,URL}\n \"\"\"\n soup = _make_request(\"https://rekt.news/leaderboard\")\n if soup:\n rekt_list = soup.find(\"ol\", {\"class\": \"leaderboard-content\"}).find_all(\"li\")\n df = pd.DataFrame(columns=HACKS_COLUMNS)\n for item in rekt_list:\n a = item.find(\"a\", href=True)\n audit = item.find(\"span\", {\"class\": \"leaderboard-audit\"}).text\n details = item.find(\"div\", {\"class\": \"leaderboard-row-details\"}).text.split(\n \"|\"\n )\n url = a[\"href\"]\n title = a.text\n amount = int(details[0][1:].replace(\",\", \"\"))\n date = details[1].replace(\" \", \"\")\n df.loc[len(df.index)] = [\n title,\n date,\n amount,\n audit,\n url.replace(\"/\", \"\"),\n f\"https://rekt.news{url}\",\n ]\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n return df\n return pd.DataFrame()\n\n\n@log_start_end(log=logger)\ndef get_crypto_hack(slug: str) -> Union[str, None]:\n \"\"\"Get crypto hack\n [Source: https://rekt.news]\n\n Parameters\n ----------\n slug: str\n slug of crypto hack\n\n Returns\n -------\n pandas.DataFrame:\n Hacks with columns {Platform,Date,Amount [$],Audited,URL}\n \"\"\"\n url = f\"https://rekt.news/{slug}\"\n soup = _make_request(url)\n if not soup:\n console.print(f'Slug \"{slug}\" not found\\n')\n return None\n title = soup.find(\"h1\", {\"class\": \"post-title\"}).text\n date = soup.find(\"time\").text\n content = (\n soup.find(\"section\", {\"class\": \"post-content\"})\n .get_text(\"\\n\")\n .replace(\"\\r\\n,\", \", \")\n .replace(\"\\n,\", \", \")\n .replace(\"\\r\\n.\", \".\\n\\t\")\n .replace(\"\\n.\", \".\\n\\t\")\n .replace(\"\\r\\n \", \" \")\n .replace(\"\\n \", \" \")\n ).split(\"\"\"SUBSCRIBE\"\"\")[0]\n final_str = f\"\"\"\n {title}\n {date}\n\n {content}\n\n Detailed history in {url}\n \"\"\"\n return final_str\n\n\n@log_start_end(log=logger)\ndef get_crypto_hack_slugs() -> List[str]:\n \"\"\"Get all crypto hack slugs\n [Source: https://rekt.news]\n Returns\n -------\n List[str]:\n List with slugs\n \"\"\"\n soup = _make_request(\"https://rekt.news/leaderboard\")\n href_list = []\n if soup:\n rekt_list = soup.find(\"ol\", {\"class\": \"leaderboard-content\"}).find_all(\"li\")\n for item in rekt_list:\n a = item.find(\"a\", href=True)[\"href\"].replace(\"/\", \"\")\n href_list.append(a)\n return href_list\n return href_list\n", "\"\"\"Terra Money FCD model\"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\nimport textwrap\nfrom datetime import datetime\nfrom typing import Any, Tuple, Dict\n\nimport pandas as pd\nimport requests\n\nfrom gamestonk_terminal.cryptocurrency.dataframe_helpers import (\n denominate_number,\n prettify_column_names,\n replace_unicode,\n)\nfrom gamestonk_terminal.decorators import log_start_end\n\nlogger = logging.getLogger(__name__)\n\nGOV_COLUMNS = [\n \"submitTime\",\n \"id\",\n \"depositEndTime\",\n \"status\",\n \"type\",\n \"title\",\n \"Yes\",\n \"No\",\n]\nGOV_STATUSES = [\"voting\", \"deposit\", \"passed\", \"rejected\", \"all\"]\nVALIDATORS_COLUMNS = [\n \"validatorName\",\n \"tokensAmount\",\n \"votingPower\",\n \"commissionRate\",\n \"status\",\n \"uptime\",\n]\n\n\n@log_start_end(log=logger)\ndef _make_request(endpoint: str) -> dict:\n \"\"\"Helper method handles terra fcd api requests. [Source: https://fcd.terra.dev/v1]\n\n Parameters\n ----------\n endpoint: str\n endpoint url\n Returns\n -------\n dict:\n dictionary with response data\n \"\"\"\n\n url = f\"https://fcd.terra.dev/v1/{endpoint}\"\n response = requests.get(\n url, headers={\"Accept\": \"application/json\", \"User-Agent\": \"GST\"}\n )\n if not 200 <= response.status_code < 300:\n raise Exception(f\"fcd terra api exception: {response.text}\")\n try:\n return response.json()\n except Exception as e:\n raise ValueError(f\"Invalid Response: {response.text}\") from e\n\n\n@log_start_end(log=logger)\ndef _adjust_delegation_info(delegation: dict) -> dict:\n \"\"\"Helper method which removes redundant fields from delegation info dictionary,\n and denominate value fields. [Source: https://fcd.terra.dev/v1]\n\n Parameters\n ----------\n delegation:\n dictionary object with delegation data e.g.\n\n Returns\n -------\n dict\n adjusted dictionary with delegation data\n \"\"\"\n\n delegation_info = {}\n for key, value in delegation.items():\n if key in [\"amountDelegated\", \"totalReward\"]:\n delegation_info[key] = denominate_number(value)\n elif key in [\"validatorAddress\", \"rewards\"]:\n continue\n else:\n delegation_info[key] = value\n return delegation_info\n\n\n@log_start_end(log=logger)\ndef get_staking_account_info(address: str = \"\") -> Tuple[pd.DataFrame, str]:\n \"\"\"Get staking info for provided terra account [Source: https://fcd.terra.dev/swagger]\n\n Parameters\n ----------\n address: str\n terra blockchain address e.g. terra1jvwelvs7rdk6j3mqdztq5tya99w8lxk6l9hcqg\n Returns\n -------\n Tuple[pd.DataFrame, str]:\n luna delegations and summary report for given address\n \"\"\"\n\n response = _make_request(f\"staking/{address}\")\n results: Dict[str, Any] = {\"myDelegations\": []}\n\n for field in [\"availableLuna\", \"delegationTotal\"]:\n results[field] = denominate_number(response.get(field, 0))\n\n my_delegations = response.get(\"myDelegations\")\n if my_delegations:\n for delegation in my_delegations:\n validator = _adjust_delegation_info(delegation)\n results[\"myDelegations\"].append(validator)\n\n df = pd.DataFrame(results[\"myDelegations\"])\n\n try:\n df[\"validatorName\"] = df[\"validatorName\"].apply(lambda x: replace_unicode(x))\n df.columns = prettify_column_names(list(df.columns))\n except KeyError:\n df = pd.DataFrame()\n\n results[\"totalRewards\"] = denominate_number(\n response.get(\"rewards\", {}).get(\"total\", 0)\n )\n\n report = f\"\"\"Overview:\n Address: {address}\n Available Luna: {results['availableLuna']}\n Delegated Luna: {results['delegationTotal']}\n Total Rewards: {results['totalRewards']}\\n\"\"\"\n report += \"\\nDelegations: \" if not df.empty else \"\\nNo delegations found\\n\"\n\n return df, report\n\n\n@log_start_end(log=logger)\ndef get_validators() -> pd.DataFrame:\n \"\"\"Get information about terra validators [Source: https://fcd.terra.dev/swagger]\n\n Returns\n -------\n pd.DataFrame\n terra validators details\n \"\"\"\n\n response = _make_request(\"staking\")[\"validators\"]\n results = []\n for validator in response:\n results.append(\n {\n \"accountAddress\": validator[\"accountAddress\"],\n \"validatorName\": validator[\"description\"].get(\"moniker\"),\n \"tokensAmount\": denominate_number(validator[\"tokens\"]),\n \"votingPower\": round(\n (float(validator[\"votingPower\"].get(\"weight\")) * 100), 2\n ),\n \"commissionRate\": round(\n (float(validator[\"commissionInfo\"].get(\"rate\", 0)) * 100), 2\n ),\n \"status\": validator[\"status\"],\n \"uptime\": round((float(validator.get(\"upTime\", 0)) * 100), 2),\n }\n )\n\n return pd.DataFrame(results).sort_values(by=\"votingPower\")\n\n\n@log_start_end(log=logger)\ndef get_proposals(status: str = \"\") -> pd.DataFrame:\n \"\"\"Get terra blockchain governance proposals list [Source: https://fcd.terra.dev/swagger]\n\n Parameters\n ----------\n status: str\n status of proposal, one from list: ['Voting','Deposit','Passed','Rejected']\n\n Returns\n -------\n pd.DataFrame\n Terra blockchain governance proposals list\n \"\"\"\n\n statuses = [\"Voting\", \"Deposit\", \"Passed\", \"Rejected\"]\n response = _make_request(\"gov/proposals\")[\"proposals\"]\n results = []\n votes_options = [\"Yes\", \"Abstain\", \"No\", \"NoWithVeto\"]\n for proposal in response:\n deposit = proposal.pop(\"deposit\")\n proposal[\"depositEndTime\"] = deposit.get(\"depositEndTime\")\n vote = proposal.pop(\"vote\")\n proposal.pop(\"proposer\")\n for opt in votes_options:\n proposal[opt] = vote[\"count\"].get(opt)\n\n results.append(proposal)\n columns = [\n \"id\",\n \"submitTime\",\n \"depositEndTime\",\n \"status\",\n \"type\",\n \"title\",\n \"Yes\",\n \"No\",\n \"Abstain\",\n \"NoWithVeto\",\n ]\n df = pd.DataFrame(results)[columns]\n df[[\"id\", \"Yes\", \"No\", \"Abstain\", \"NoWithVeto\"]] = df[\n [\"id\", \"Yes\", \"No\", \"Abstain\", \"NoWithVeto\"]\n ].astype(int, errors=\"ignore\")\n df[\"title\"] = df[\"title\"].apply(\n lambda x: \"\\n\".join(textwrap.wrap(x, width=40)) if isinstance(x, str) else x\n )\n\n for col in [\"submitTime\", \"depositEndTime\"]:\n df[col] = df[col].apply(lambda x: pd.to_datetime(x).strftime(\"%Y-%m-%d %H:%M\"))\n\n if status.title() in statuses:\n df = df[df[\"status\"] == status.title()]\n return df\n\n\n@log_start_end(log=logger)\ndef get_account_growth(cumulative: bool = True) -> pd.DataFrame:\n \"\"\"Get terra blockchain account growth history [Source: https://fcd.terra.dev/swagger]\n\n Parameters\n ----------\n cumulative: bool\n distinguish between periodical and cumulative account growth data\n Returns\n -------\n pd.DataFrame\n historical data of accounts growth\n \"\"\"\n\n response = _make_request(\"dashboard/account_growth\")\n kind = \"cumulative\" if cumulative else \"periodic\"\n df = pd.DataFrame(response[kind])\n df[\"date\"] = df[\"datetime\"].apply(lambda x: datetime.fromtimestamp(x / 1000).date())\n df = df[[\"date\", \"totalAccountCount\", \"activeAccountCount\"]]\n df.columns = [\"date\", \"Total accounts\", \"Active accounts\"]\n return df\n\n\n@log_start_end(log=logger)\ndef get_staking_ratio_history():\n \"\"\"Get terra blockchain staking ratio history [Source: https://fcd.terra.dev/swagger]\n\n Returns\n -------\n pd.DataFrame\n historical staking ratio\n \"\"\"\n\n response = _make_request(\"dashboard/staking_ratio\")\n df = pd.DataFrame(response)\n df[\"date\"] = df[\"datetime\"].apply(lambda x: datetime.fromtimestamp(x / 1000).date())\n df[\"stakingRatio\"] = df[\"stakingRatio\"].apply(lambda x: round(float(x) * 100, 2))\n return df[[\"date\", \"stakingRatio\"]]\n\n\n@log_start_end(log=logger)\ndef get_staking_returns_history():\n \"\"\"Get terra blockchain staking returns history [Source: https://fcd.terra.dev/v1]\n\n Returns\n -------\n pd.DataFrame\n historical staking returns\n \"\"\"\n\n response = _make_request(\"dashboard/staking_return\")\n df = pd.DataFrame(response)\n df[\"date\"] = df[\"datetime\"].apply(lambda x: datetime.fromtimestamp(x / 1000).date())\n df[\"annualizedReturn\"] = df[\"annualizedReturn\"].apply(\n lambda x: round(float(x) * 100, 2)\n )\n return df[[\"date\", \"annualizedReturn\"]]\n", "\"\"\"AlphaVantage Forex View.\"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\nfrom typing import Optional, List\n\nimport matplotlib.pyplot as plt\nimport mplfinance as mpf\nimport pandas as pd\n\nfrom gamestonk_terminal.config_terminal import theme\nfrom gamestonk_terminal.decorators import log_start_end\nfrom gamestonk_terminal.forex import av_model\nfrom gamestonk_terminal.helper_funcs import plot_autoscale, print_rich_table\nfrom gamestonk_terminal.rich_config import console\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef display_quote(to_symbol: str, from_symbol: str):\n \"\"\"Display current forex pair exchange rate.\n\n Parameters\n ----------\n to_symbol : str\n To symbol\n from_symbol : str\n From forex symbol\n \"\"\"\n quote = av_model.get_quote(to_symbol, from_symbol)\n\n if not quote:\n console.print(\"[red]Quote not pulled from AlphaVantage. Check API key.[/red]\")\n return\n\n df = pd.DataFrame.from_dict(quote)\n df.index = df.index.to_series().apply(lambda x: x[3:]).values\n df = df.iloc[[0, 2, 5, 4, 7, 8]]\n print_rich_table(\n df,\n show_index=True,\n title=f\"[bold]{from_symbol}/{to_symbol} Quote [/bold]\",\n )\n console.print(\"\")\n\n\n@log_start_end(log=logger)\ndef display_candle(\n data: pd.DataFrame,\n to_symbol: str,\n from_symbol: str,\n external_axes: Optional[List[plt.Axes]] = None,\n):\n \"\"\"Show candle plot for fx data.\n\n Parameters\n ----------\n data : pd.DataFrame\n Loaded fx historical data\n to_symbol : str\n To forex symbol\n from_symbol : str\n From forex symbol\n external_axes: Optional[List[plt.Axes]]\n External axes (1 axis are expected in the list), by default None\n \"\"\"\n candle_chart_kwargs = {\n \"type\": \"candle\",\n \"style\": theme.mpf_style,\n \"mav\": (20, 50),\n \"volume\": False,\n \"xrotation\": theme.xticks_rotation,\n \"scale_padding\": {\"left\": 0.3, \"right\": 1, \"top\": 0.8, \"bottom\": 0.8},\n \"update_width_config\": {\n \"candle_linewidth\": 0.6,\n \"candle_width\": 0.8,\n \"volume_linewidth\": 0.8,\n \"volume_width\": 0.8,\n },\n \"warn_too_much_data\": 10000,\n }\n # This plot has 2 axes\n if not external_axes:\n candle_chart_kwargs[\"returnfig\"] = True\n candle_chart_kwargs[\"figratio\"] = (10, 7)\n candle_chart_kwargs[\"figscale\"] = 1.10\n candle_chart_kwargs[\"figsize\"] = plot_autoscale()\n fig, _ = mpf.plot(data, **candle_chart_kwargs)\n fig.suptitle(\n f\"{from_symbol}/{to_symbol}\",\n x=0.055,\n y=0.965,\n horizontalalignment=\"left\",\n )\n theme.visualize_output(force_tight_layout=False)\n else:\n if len(external_axes) != 1:\n console.print(\"[red]Expected list of 1 axis items./n[/red]\")\n return\n (ax1,) = external_axes\n candle_chart_kwargs[\"ax\"] = ax1\n mpf.plot(data, **candle_chart_kwargs)\n", "import logging\nimport os\n\nfrom matplotlib import pyplot\nfrom prompt_toolkit import PromptSession\nfrom prompt_toolkit.eventloop.inputhook import set_eventloop_with_inputhook\nfrom prompt_toolkit.history import FileHistory\nfrom gamestonk_terminal.rich_config import console\n\nlogger = logging.getLogger(__name__)\n\n\ndef inputhook(inputhook_context):\n while not inputhook_context.input_is_ready():\n try:\n pyplot.pause(0.1)\n except Exception as exp:\n logger.exception(\"%s\", type(exp).__name__)\n continue\n return False\n\n\nhistory_file = os.path.join(os.path.expanduser(\"~\"), \".gamestonk_terminal.his\")\n\ntry:\n session = PromptSession(history=FileHistory(history_file)) # type: ignore\n set_eventloop_with_inputhook(inputhook)\n# pylint: disable=unused-variable\nexcept Exception as e: # noqa: F841\n logger.exception(\"%s\", type(e).__name__)\n console.print(\n \"WARNING: Prompt toolkit is turned on but did not initialize successfully. Falling back to input()...\"\n )\n session = None # type: ignore\n", "# IMPORTATION STANDARD\nimport pandas as pd\nimport numpy as np\n\n# IMPORTATION THIRDPARTY\nimport pytest\n\n# IMPORTATION INTERNAL\nfrom gamestonk_terminal.stocks.sector_industry_analysis import stockanalysis_model\n\n\[email protected]\[email protected](\n \"stocks, finance_key, sa_keys, period, statement\",\n [\n ([\"AAPL\"], \"re\", stockanalysis_model.sa_keys, \"annual\", \"IS\"),\n ([\"AAPL\"], \"rec\", stockanalysis_model.sa_keys, \"quarterly\", \"BS\"),\n (\n [\"FB\", \"TSLA\", \"MSFT\"],\n \"ncf\",\n stockanalysis_model.sa_keys,\n \"annual\",\n \"CF\",\n ),\n (\n [\"FB\", \"TSLA\", \"MSFT\"],\n \"ni\",\n stockanalysis_model.sa_keys,\n \"quarterly\",\n \"IS\",\n ),\n (\n [\"FB\", \"TSLA\", \"MSFT\"],\n \"tle\",\n stockanalysis_model.sa_keys,\n \"trailing\",\n \"BS\",\n ),\n ],\n)\ndef test_get_stocks_data(recorder, stocks, finance_key, sa_keys, period, statement):\n result = stockanalysis_model.get_stocks_data(\n stocks=stocks,\n finance_key=finance_key,\n sa_dict=sa_keys,\n stocks_data=dict(),\n period=period,\n )\n\n recorder.capture_list(result[statement].values())\n\n\[email protected]\ndef test_match_length_dataframes(recorder):\n result = stockanalysis_model.match_length_dataframes(\n dataframes={\n \"TSLA\": pd.DataFrame(\n np.nan, index=[\"Item 1\", \"Item 2\"], columns=[\"2010\", \"2011\", \"2012\"]\n ),\n \"AAPL\": pd.DataFrame(np.nan, index=[\"Item 1\", \"Item 2\"], columns=[\"2011\"]),\n }\n )\n\n recorder.capture_list(result.values())\n\n\[email protected]\ndef test_change_type_dataframes(recorder):\n result = stockanalysis_model.change_type_dataframes(\n dataframe=pd.DataFrame(\n [[\"1,0\", \"1.0\", \"1,0\"], [\"2,0\", \"2,000\", 2]],\n index=[\"Item 1\", \"Item 2\"],\n columns=[\"2010\", \"2011\", \"2012\"],\n )\n )\n\n recorder.capture(result)\n", "import df2img\nimport disnake\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom PIL import Image\n\nimport discordbot.config_discordbot as cfg\nfrom discordbot.config_discordbot import logger\nfrom discordbot.helpers import autocrop_image\nfrom gamestonk_terminal.helper_funcs import get_user_agent\n\n\nasync def unu_command(ctx, num: int = None):\n \"\"\"Unusual Options\"\"\"\n try:\n\n # Debug\n if cfg.DEBUG:\n logger.debug(\"!stocks.opt.unu %s\", num)\n\n # Check for argument\n if num is None:\n num = 10\n\n pages = np.arange(0, num // 20 + 1)\n data_list = []\n for page_num in pages:\n\n r = requests.get(\n f\"https://app.fdscanner.com/api2/unusualvolume?p=0&page_size=20&page={int(page_num)}\",\n headers={\"User-Agent\": get_user_agent()},\n )\n\n if r.status_code != 200:\n logger.debug(\"Error in fdscanner request\")\n return pd.DataFrame(), \"request error\"\n\n data_list.append(r.json())\n\n ticker, expiry, option_strike, option_type, ask, bid, oi, vol, voi = (\n [],\n [],\n [],\n [],\n [],\n [],\n [],\n [],\n [],\n )\n for data in data_list:\n for entry in data[\"data\"]:\n ticker.append(entry[\"tk\"])\n expiry.append(entry[\"expiry\"])\n option_strike.append(float(entry[\"s\"]))\n option_type.append(\"Put\" if entry[\"t\"] == \"P\" else \"Call\")\n ask.append(entry[\"a\"])\n bid.append(entry[\"b\"])\n oi.append(entry[\"oi\"])\n vol.append(entry[\"v\"])\n voi.append(entry[\"vol/oi\"])\n\n df = pd.DataFrame(\n {\n \"Ticker\": ticker,\n \"Exp\": expiry,\n \"Strike\": option_strike,\n \"Type\": option_type,\n \"Vol/OI\": voi,\n \"Vol\": vol,\n \"OI\": oi,\n }\n )\n\n df = df.replace({\"2021-\", \"2022-\"}, \"\", regex=True)\n df.set_index(\"Ticker\", inplace=True)\n dindex = len(df.index)\n fig = df2img.plot_dataframe(\n df,\n fig_size=(800, (40 + (40 * dindex))),\n col_width=[3, 3, 3, 3, 3, 3, 3],\n tbl_cells=dict(\n align=\"left\",\n height=35,\n ),\n template=\"plotly_dark\",\n font=dict(\n family=\"Consolas\",\n size=20,\n ),\n paper_bgcolor=\"rgba(0, 0, 0, 0)\",\n )\n imagefile = \"opt-unu.png\"\n df2img.save_dataframe(fig=fig, filename=imagefile)\n image = Image.open(imagefile)\n image = autocrop_image(image, 0)\n image.save(imagefile, \"PNG\", quality=100)\n image = disnake.File(imagefile)\n title = \"Unusual Options\"\n embed = disnake.Embed(title=title, colour=cfg.COLOR)\n embed.set_image(url=f\"attachment://{imagefile}\")\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n await ctx.send(embed=embed, file=image)\n\n except Exception as e:\n embed = disnake.Embed(\n title=\"ERROR Unusual Options\",\n colour=cfg.COLOR,\n description=e,\n )\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n\n await ctx.send(embed=embed, delete_after=30.0)\n" ]
[ [ "pandas.DataFrame" ], [ "numpy.asarray", "pandas.concat", "matplotlib.pyplot.subplots", "pandas.DataFrame" ], [ "pandas.DataFrame" ], [ "pandas.DataFrame" ], [ "pandas.to_datetime", "pandas.DataFrame" ], [ "pandas.to_datetime", "pandas.DataFrame" ], [ "pandas.DataFrame.from_dict" ], [ "matplotlib.pyplot.pause" ], [ "pandas.DataFrame" ], [ "numpy.arange", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
matthieucoquet/probability
[ "2426f4fc4743ceedc1a638a03d19ce6654ebff76", "2426f4fc4743ceedc1a638a03d19ce6654ebff76", "2426f4fc4743ceedc1a638a03d19ce6654ebff76", "2426f4fc4743ceedc1a638a03d19ce6654ebff76", "2426f4fc4743ceedc1a638a03d19ce6654ebff76", "2426f4fc4743ceedc1a638a03d19ce6654ebff76", "2426f4fc4743ceedc1a638a03d19ce6654ebff76" ]
[ "tensorflow_probability/python/experimental/auto_batching/numpy_backend_test.py", "tensorflow_probability/python/bijectors/sigmoid_test.py", "tensorflow_probability/python/internal/backend/numpy/_utils.py", "tensorflow_probability/python/mcmc/sample_halton_sequence.py", "tensorflow_probability/python/sts/smooth_seasonal.py", "tensorflow_probability/python/experimental/auto_batching/allocation_strategy_test.py", "tensorflow_probability/python/distributions/von_mises_fisher_test.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for implementations of batched variables.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport hypothesis as hp\nfrom hypothesis import strategies as hps\nfrom hypothesis.extra import numpy as hpnp\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.experimental.auto_batching import backend_test_lib as backend_test\nfrom tensorflow_probability.python.experimental.auto_batching import instructions as inst\nfrom tensorflow_probability.python.experimental.auto_batching import numpy_backend\n\nNP_BACKEND = numpy_backend.NumpyBackend()\n\n\ndef var_init(max_stack_depth, initial_value):\n type_ = inst.TensorType(initial_value.dtype, initial_value.shape[1:])\n var = NP_BACKEND.create_variable(\n None, inst.VariableAllocation.FULL, type_,\n max_stack_depth, batch_size=initial_value.shape[0])\n return var.update(\n initial_value, NP_BACKEND.full_mask(initial_value.shape[0]))\n\n\n# A TF test case for self.assertAllEqual, but doesn't use TF so doesn't care\n# about Eager vs Graph mode.\nclass NumpyVariableTest(tf.test.TestCase, backend_test.VariableTestCase):\n\n def testNumpySmoke(self):\n \"\"\"Test the property on specific example, without relying on Hypothesis.\"\"\"\n init = (12, np.random.randn(3, 2, 2).astype(np.float32))\n ops = [('pop', [False, False, True]),\n ('push', [True, False, True]),\n ('update', np.ones((3, 2, 2), dtype=np.float32),\n [True, True, False]),\n ('pop', [True, False, True])]\n self.check_same_results(init, ops, var_init)\n\n @hp.given(hps.data())\n @hp.settings(\n deadline=None,\n max_examples=100)\n def testNumpyVariableRandomOps(self, data):\n # Hypothesis strategy:\n # Generate a random max stack depth and value shape\n # Deduce the batch size from the value shape\n # Make a random dtype\n # Generate a random initial value of that dtype and shape\n # Generate ops, some of which write random values of that dtype and shape\n max_stack_depth = data.draw(hps.integers(min_value=1, max_value=1000))\n value_shape = data.draw(hpnp.array_shapes(min_dims=1))\n batch_size = value_shape[0]\n dtype = data.draw(hpnp.scalar_dtypes())\n masks = hpnp.arrays(dtype=np.bool, shape=[batch_size])\n values = hpnp.arrays(dtype, value_shape)\n init_val = data.draw(values)\n ops = data.draw(\n hps.lists(\n hps.one_of(\n hps.tuples(hps.just('update'), values, masks),\n hps.tuples(hps.just('push'), masks),\n hps.tuples(hps.just('pop'), masks), # preserve line break\n hps.tuples(hps.just('read')))))\n self.check_same_results((max_stack_depth, init_val), ops, var_init)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Sigmoid Tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nfrom scipy import special\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import bijectors as tfb\n\nfrom tensorflow_probability.python.bijectors import bijector_test_util\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass SigmoidBijectorTest(tf.test.TestCase):\n \"\"\"Tests correctness of the Y = g(X) = (1 + exp(-X))^-1 transformation.\"\"\"\n\n def testBijector(self):\n self.assertStartsWith(tfb.Sigmoid().name, \"sigmoid\")\n x = np.linspace(-10., 10., 100).reshape([2, 5, 10]).astype(np.float32)\n y = special.expit(x)\n ildj = -np.log(y) - np.log1p(-y)\n bijector = tfb.Sigmoid()\n self.assertAllClose(\n y, self.evaluate(bijector.forward(x)), atol=0., rtol=1e-2)\n self.assertAllClose(\n x, self.evaluate(bijector.inverse(y)), atol=0., rtol=1e-4)\n self.assertAllClose(\n ildj,\n self.evaluate(bijector.inverse_log_det_jacobian(\n y, event_ndims=0)), atol=0., rtol=1e-6)\n self.assertAllClose(\n -ildj,\n self.evaluate(bijector.forward_log_det_jacobian(\n x, event_ndims=0)), atol=0., rtol=1e-4)\n\n def testScalarCongruency(self):\n bijector_test_util.assert_scalar_congruency(\n tfb.Sigmoid(), lower_x=-7., upper_x=7., eval_func=self.evaluate,\n rtol=.1)\n\n def testBijectiveAndFinite(self):\n x = np.linspace(-100., 100., 100).astype(np.float32)\n eps = 1e-3\n y = np.linspace(eps, 1. - eps, 100).astype(np.float32)\n bijector_test_util.assert_bijective_and_finite(\n tfb.Sigmoid(), x, y, eval_func=self.evaluate, event_ndims=0, atol=0.,\n rtol=1e-4)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Helper functions for numpy backend.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport importlib\nimport types\n\nimport numpy as np\nimport tensorflow as tf\n\n\n__all__ = [\n 'common_dtype',\n 'copy_docstring',\n 'numpy_dtype',\n 'try_import',\n]\n\n\n# TODO(jvdillon): Get decoration working. Eg,\n# # Dependency imports\n# import decorator\n\n\ndef copy_docstring(original_fn, new_fn): # pylint: disable=unused-argument\n return new_fn\n # TODO(jvdillon): Get decoration working. Eg,\n # @decorator.decorator\n # def wrap(wrapped_fn, *args, **kwargs):\n # del wrapped_fn\n # return new_fn(*args, **kwargs)\n # return wrap(original_fn)\n\n\ndef numpy_dtype(dtype):\n if dtype is None:\n return None\n if hasattr(dtype, 'as_numpy_dtype'):\n return dtype.as_numpy_dtype\n return dtype\n\n\ndef common_dtype(args_list, dtype_hint=None):\n \"\"\"Returns explict dtype from `args_list` if exists, else dtype_hint.\"\"\"\n dtype = None\n dtype_hint = None if dtype_hint is None else tf.as_dtype(dtype_hint)\n for a in tf.nest.flatten(args_list):\n if hasattr(a, 'dtype'):\n dt = tf.as_dtype(a.dtype)\n else:\n continue\n if dtype is None:\n dtype = dt\n elif dtype != dt:\n raise TypeError('Found incompatible dtypes, {} and {}.'.format(dtype, dt))\n if dtype is None and dtype_hint is None:\n return None\n return (dtype_hint if dtype is None else dtype).as_numpy_dtype\n\n\ndef is_complex(dtype):\n \"\"\"Returns whether this is a complex floating point type.\"\"\"\n return np.issubdtype(np.dtype(dtype), np.complexfloating)\n\n\nclass _FakeModule(types.ModuleType):\n \"\"\"Dummy module which raises `NotImplementedError` on `getattr` access.\"\"\"\n\n def __init__(self, name, doc):\n self._name = name\n self._doc = doc\n types.ModuleType.__init__(self, name, doc) # pylint: disable=non-parent-init-called\n\n def __dir__(self):\n return []\n\n def __getattr__(self, attr):\n raise NotImplementedError(self._doc)\n\n\ndef try_import(name): # pylint: disable=invalid-name\n try:\n return importlib.import_module(name)\n except ImportError:\n return _FakeModule(name, 'Error loading module \"{}\".'.format(name))\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Quasi Monte Carlo support: Halton sequence.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.util.seed_stream import SeedStream\n\n\n__all__ = [\n 'sample_halton_sequence',\n]\n\n\n# The maximum dimension we support. This is limited by the number of primes\n# in the _PRIMES array.\n_MAX_DIMENSION = 1000\n\n\ndef sample_halton_sequence(dim,\n num_results=None,\n sequence_indices=None,\n dtype=tf.float32,\n randomized=True,\n seed=None,\n name=None):\n r\"\"\"Returns a sample from the `dim` dimensional Halton sequence.\n\n Warning: The sequence elements take values only between 0 and 1. Care must be\n taken to appropriately transform the domain of a function if it differs from\n the unit cube before evaluating integrals using Halton samples. It is also\n important to remember that quasi-random numbers without randomization are not\n a replacement for pseudo-random numbers in every context. Quasi random numbers\n are completely deterministic and typically have significant negative\n autocorrelation unless randomization is used.\n\n Computes the members of the low discrepancy Halton sequence in dimension\n `dim`. The `dim`-dimensional sequence takes values in the unit hypercube in\n `dim` dimensions. Currently, only dimensions up to 1000 are supported. The\n prime base for the k-th axes is the k-th prime starting from 2. For example,\n if `dim` = 3, then the bases will be [2, 3, 5] respectively and the first\n element of the non-randomized sequence will be: [0.5, 0.333, 0.2]. For a more\n complete description of the Halton sequences see\n [here](https://en.wikipedia.org/wiki/Halton_sequence). For low discrepancy\n sequences and their applications see\n [here](https://en.wikipedia.org/wiki/Low-discrepancy_sequence).\n\n If `randomized` is true, this function produces a scrambled version of the\n Halton sequence introduced by [Owen (2017)][1]. For the advantages of\n randomization of low discrepancy sequences see [here](\n https://en.wikipedia.org/wiki/Quasi-Monte_Carlo_method#Randomization_of_quasi-Monte_Carlo).\n\n The number of samples produced is controlled by the `num_results` and\n `sequence_indices` parameters. The user must supply either `num_results` or\n `sequence_indices` but not both.\n The former is the number of samples to produce starting from the first\n element. If `sequence_indices` is given instead, the specified elements of\n the sequence are generated. For example, sequence_indices=tf.range(10) is\n equivalent to specifying n=10.\n\n #### Examples\n\n ```python\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n # Produce the first 1000 members of the Halton sequence in 3 dimensions.\n num_results = 1000\n dim = 3\n sample = tfp.mcmc.sample_halton_sequence(\n dim,\n num_results=num_results,\n seed=127)\n\n # Evaluate the integral of x_1 * x_2^2 * x_3^3 over the three dimensional\n # hypercube.\n powers = tf.range(1.0, limit=dim + 1)\n integral = tf.reduce_mean(tf.reduce_prod(sample ** powers, axis=-1))\n true_value = 1.0 / tf.reduce_prod(powers + 1.0)\n with tf.Session() as session:\n values = session.run((integral, true_value))\n\n # Produces a relative absolute error of 1.7%.\n print (\"Estimated: %f, True Value: %f\" % values)\n\n # Now skip the first 1000 samples and recompute the integral with the next\n # thousand samples. The sequence_indices argument can be used to do this.\n\n\n sequence_indices = tf.range(start=1000, limit=1000 + num_results,\n dtype=tf.int32)\n sample_leaped = tfp.mcmc.sample_halton_sequence(\n dim,\n sequence_indices=sequence_indices,\n seed=111217)\n\n integral_leaped = tf.reduce_mean(tf.reduce_prod(sample_leaped ** powers,\n axis=-1))\n with tf.Session() as session:\n values = session.run((integral_leaped, true_value))\n # Now produces a relative absolute error of 0.05%.\n print (\"Leaped Estimated: %f, True Value: %f\" % values)\n ```\n\n Args:\n dim: Positive Python `int` representing each sample's `event_size.` Must\n not be greater than 1000.\n num_results: (Optional) Positive scalar `Tensor` of dtype int32. The number\n of samples to generate. Either this parameter or sequence_indices must\n be specified but not both. If this parameter is None, then the behaviour\n is determined by the `sequence_indices`.\n Default value: `None`.\n sequence_indices: (Optional) `Tensor` of dtype int32 and rank 1. The\n elements of the sequence to compute specified by their position in the\n sequence. The entries index into the Halton sequence starting with 0 and\n hence, must be whole numbers. For example, sequence_indices=[0, 5, 6] will\n produce the first, sixth and seventh elements of the sequence. If this\n parameter is None, then the `num_results` parameter must be specified\n which gives the number of desired samples starting from the first sample.\n Default value: `None`.\n dtype: (Optional) The dtype of the sample. One of: `float16`, `float32` or\n `float64`.\n Default value: `tf.float32`.\n randomized: (Optional) bool indicating whether to produce a randomized\n Halton sequence. If True, applies the randomization described in\n [Owen (2017)][1].\n Default value: `True`.\n seed: (Optional) Python integer to seed the random number generator. Only\n used if `randomized` is True. If not supplied and `randomized` is True,\n no seed is set.\n Default value: `None`.\n name: (Optional) Python `str` describing ops managed by this function. If\n not supplied the name of this function is used.\n Default value: \"sample_halton_sequence\".\n\n Returns:\n halton_elements: Elements of the Halton sequence. `Tensor` of supplied dtype\n and `shape` `[num_results, dim]` if `num_results` was specified or shape\n `[s, dim]` where s is the size of `sequence_indices` if `sequence_indices`\n were specified.\n\n Raises:\n ValueError: if both `sequence_indices` and `num_results` were specified or\n if dimension `dim` is less than 1 or greater than 1000.\n\n #### References\n\n [1]: Art B. Owen. A randomized Halton algorithm in R. _arXiv preprint\n arXiv:1706.02808_, 2017. https://arxiv.org/abs/1706.02808\n \"\"\"\n if dim < 1 or dim > _MAX_DIMENSION:\n raise ValueError(\n 'Dimension must be between 1 and {}. Supplied {}'.format(_MAX_DIMENSION,\n dim))\n if (num_results is None) == (sequence_indices is None):\n raise ValueError('Either `num_results` or `sequence_indices` must be'\n ' specified but not both.')\n\n if not dtype.is_floating:\n raise ValueError('dtype must be of `float`-type')\n\n with tf.compat.v1.name_scope(\n name, 'sample', values=[num_results, sequence_indices]):\n # Here and in the following, the shape layout is as follows:\n # [sample dimension, event dimension, coefficient dimension].\n # The coefficient dimension is an intermediate axes which will hold the\n # weights of the starting integer when expressed in the (prime) base for\n # an event dimension.\n if num_results is not None:\n num_results = tf.convert_to_tensor(value=num_results)\n if sequence_indices is not None:\n sequence_indices = tf.convert_to_tensor(value=sequence_indices)\n indices = _get_indices(num_results, sequence_indices, dtype)\n radixes = tf.constant(_PRIMES[0:dim], dtype=dtype, shape=[dim, 1])\n\n max_sizes_by_axes = _base_expansion_size(\n tf.reduce_max(input_tensor=indices), radixes)\n\n max_size = tf.reduce_max(input_tensor=max_sizes_by_axes)\n\n # The powers of the radixes that we will need. Note that there is a bit\n # of an excess here. Suppose we need the place value coefficients of 7\n # in base 2 and 3. For 2, we will have 3 digits but we only need 2 digits\n # for base 3. However, we can only create rectangular tensors so we\n # store both expansions in a [2, 3] tensor. This leads to the problem that\n # we might end up attempting to raise large numbers to large powers. For\n # example, base 2 expansion of 1024 has 10 digits. If we were in 10\n # dimensions, then the 10th prime (29) we will end up computing 29^10 even\n # though we don't need it. We avoid this by setting the exponents for each\n # axes to 0 beyond the maximum value needed for that dimension.\n exponents_by_axes = tf.tile([tf.range(max_size)], [dim, 1])\n\n # The mask is true for those coefficients that are irrelevant.\n weight_mask = exponents_by_axes >= max_sizes_by_axes\n capped_exponents = tf.compat.v1.where(weight_mask,\n tf.zeros_like(exponents_by_axes),\n exponents_by_axes)\n weights = radixes ** capped_exponents\n # The following computes the base b expansion of the indices. Suppose,\n # x = a0 + a1*b + a2*b^2 + ... Then, performing a floor div of x with\n # the vector (1, b, b^2, b^3, ...) will produce\n # (a0 + s1 * b, a1 + s2 * b, ...) where s_i are coefficients we don't care\n # about. Noting that all a_i < b by definition of place value expansion,\n # we see that taking the elements mod b of the above vector produces the\n # place value expansion coefficients.\n coeffs = tf.math.floordiv(indices, weights)\n coeffs *= 1. - tf.cast(weight_mask, dtype)\n coeffs %= radixes\n if not randomized:\n coeffs /= radixes\n return tf.reduce_sum(input_tensor=coeffs / weights, axis=-1)\n stream = SeedStream(seed, salt='MCMCSampleHaltonSequence')\n coeffs = _randomize(coeffs, radixes, seed=stream())\n # Remove the contribution from randomizing the trailing zero for the\n # axes where max_size_by_axes < max_size. This will be accounted\n # for separately below (using zero_correction).\n coeffs *= 1. - tf.cast(weight_mask, dtype)\n coeffs /= radixes\n base_values = tf.reduce_sum(input_tensor=coeffs / weights, axis=-1)\n\n # The randomization used in Owen (2017) does not leave 0 invariant. While\n # we have accounted for the randomization of the first `max_size_by_axes`\n # coefficients, we still need to correct for the trailing zeros. Luckily,\n # this is equivalent to adding a uniform random value scaled so the first\n # `max_size_by_axes` coefficients are zero. The following statements perform\n # this correction.\n zero_correction = tf.random.uniform([dim, 1], seed=stream(), dtype=dtype)\n zero_correction /= radixes ** max_sizes_by_axes\n return base_values + tf.reshape(zero_correction, [-1])\n\n\ndef _randomize(coeffs, radixes, seed=None):\n \"\"\"Applies the Owen (2017) randomization to the coefficients.\"\"\"\n given_dtype = coeffs.dtype\n coeffs = tf.cast(coeffs, dtype=tf.int32)\n num_coeffs = tf.shape(input=coeffs)[-1]\n radixes = tf.reshape(tf.cast(radixes, dtype=tf.int32), shape=[-1])\n stream = SeedStream(seed, salt='MCMCSampleHaltonSequence2')\n perms = _get_permutations(num_coeffs, radixes, seed=stream())\n perms = tf.reshape(perms, shape=[-1])\n radix_sum = tf.reduce_sum(input_tensor=radixes)\n radix_offsets = tf.reshape(tf.cumsum(radixes, exclusive=True),\n shape=[-1, 1])\n offsets = radix_offsets + tf.range(num_coeffs) * radix_sum\n permuted_coeffs = tf.gather(perms, coeffs + offsets)\n return tf.cast(permuted_coeffs, dtype=given_dtype)\n\n\ndef _get_permutations(num_results, dims, seed=None):\n \"\"\"Uniform iid sample from the space of permutations.\n\n Draws a sample of size `num_results` from the group of permutations of degrees\n specified by the `dims` tensor. These are packed together into one tensor\n such that each row is one sample from each of the dimensions in `dims`. For\n example, if dims = [2,3] and num_results = 2, the result is a tensor of shape\n [2, 2 + 3] and the first row of the result might look like:\n [1, 0, 2, 0, 1]. The first two elements are a permutation over 2 elements\n while the next three are a permutation over 3 elements.\n\n Args:\n num_results: A positive scalar `Tensor` of integral type. The number of\n draws from the discrete uniform distribution over the permutation groups.\n dims: A 1D `Tensor` of the same dtype as `num_results`. The degree of the\n permutation groups from which to sample.\n seed: (Optional) Python integer to seed the random number generator.\n\n Returns:\n permutations: A `Tensor` of shape `[num_results, sum(dims)]` and the same\n dtype as `dims`.\n \"\"\"\n sample_range = tf.range(num_results)\n stream = SeedStream(seed, salt='MCMCSampleHaltonSequence3')\n\n def generate_one(d):\n seed = stream()\n fn = lambda _: tf.random.shuffle(tf.range(d), seed=seed)\n return tf.map_fn(\n fn,\n sample_range,\n parallel_iterations=1 if seed is not None else 10)\n return tf.concat([generate_one(d) for d in tf.unstack(dims)],\n axis=-1)\n\n\ndef _get_indices(num_results, sequence_indices, dtype, name=None):\n \"\"\"Generates starting points for the Halton sequence procedure.\n\n The k'th element of the sequence is generated starting from a positive integer\n which must be distinct for each `k`. It is conventional to choose the starting\n point as `k` itself (or `k+1` if k is zero based). This function generates\n the starting integers for the required elements and reshapes the result for\n later use.\n\n Args:\n num_results: Positive scalar `Tensor` of dtype int32. The number of samples\n to generate. If this parameter is supplied, then `sequence_indices`\n should be None.\n sequence_indices: `Tensor` of dtype int32 and rank 1. The entries\n index into the Halton sequence starting with 0 and hence, must be whole\n numbers. For example, sequence_indices=[0, 5, 6] will produce the first,\n sixth and seventh elements of the sequence. If this parameter is not None\n then `n` must be None.\n dtype: The dtype of the sample. One of `float32` or `float64`.\n Default is `float32`.\n name: Python `str` name which describes ops created by this function.\n\n Returns:\n indices: `Tensor` of dtype `dtype` and shape = `[n, 1, 1]`.\n \"\"\"\n with tf.compat.v1.name_scope(name, '_get_indices',\n [num_results, sequence_indices]):\n if sequence_indices is None:\n num_results = tf.cast(num_results, dtype=dtype)\n sequence_indices = tf.range(num_results, dtype=dtype)\n else:\n sequence_indices = tf.cast(sequence_indices, dtype)\n\n # Shift the indices so they are 1 based.\n indices = sequence_indices + 1\n\n # Reshape to make space for the event dimension and the place value\n # coefficients.\n return tf.reshape(indices, [-1, 1, 1])\n\n\ndef _base_expansion_size(num, bases):\n \"\"\"Computes the number of terms in the place value expansion.\n\n Let num = a0 + a1 b + a2 b^2 + ... ak b^k be the place value expansion of\n `num` in base b (ak <> 0). This function computes and returns `k+1` for each\n base `b` specified in `bases`.\n\n This can be inferred from the base `b` logarithm of `num` as follows:\n $$k = Floor(log_b (num)) + 1 = Floor( log(num) / log(b)) + 1$$\n\n Args:\n num: Scalar `Tensor` of dtype either `float32` or `float64`. The number to\n compute the base expansion size of.\n bases: `Tensor` of the same dtype as num. The bases to compute the size\n against.\n\n Returns:\n Tensor of same dtype and shape as `bases` containing the size of num when\n written in that base.\n \"\"\"\n return tf.floor(tf.math.log(num) / tf.math.log(bases)) + 1\n\n\ndef _primes_less_than(n):\n # Based on\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n \"\"\"Returns sorted array of primes such that `2 <= prime < n`.\"\"\"\n small_primes = np.array((2, 3, 5))\n if n <= 6:\n return small_primes[small_primes < n]\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=np.bool)\n sieve[0] = False\n m = int(n ** 0.5) // 3 + 1\n for i in range(m):\n if not sieve[i]:\n continue\n k = 3 * i + 1 | 1\n sieve[k ** 2 // 3::2 * k] = False\n sieve[(k ** 2 + 4 * k - 2 * k * (i & 1)) // 3::2 * k] = False\n return np.r_[2, 3, 3 * np.nonzero(sieve)[0] + 1 | 1]\n\n_PRIMES = _primes_less_than(7919 + 1)\n\n\nassert len(_PRIMES) == _MAX_DIMENSION\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Smooth Seasonal model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.internal import dtype_util\n\nfrom tensorflow_probability.python.sts.internal import util as sts_util\nfrom tensorflow_probability.python.sts.structural_time_series import Parameter\nfrom tensorflow_probability.python.sts.structural_time_series import StructuralTimeSeries\n\n\nclass SmoothSeasonalStateSpaceModel(tfd.LinearGaussianStateSpaceModel):\n \"\"\"State space model for a smooth seasonal effect.\n\n A state space model (SSM) posits a set of latent (unobserved) variables that\n evolve over time with dynamics specified by a probabilistic transition model\n `p(z[t+1] | z[t])`. At each timestep, we observe a value sampled from an\n observation model conditioned on the current state, `p(x[t] | z[t])`. The\n special case where both the transition and observation models are Gaussians\n with mean specified as a linear function of the inputs, is known as a linear\n Gaussian state space model and supports tractable exact probabilistic\n calculations; see `tfp.distributions.LinearGaussianStateSpaceModel` for\n details.\n\n A smooth seasonal effect model is a special case of a linear Gaussian SSM. It\n is the sum of a set of \"cyclic\" components, with one component for each\n frequency:\n\n ```python\n frequencies[j] = 2. * pi * frequency_multipliers[j] / period\n ```\n\n Each cyclic component contains two latent states which we denote `effect` and\n `auxiliary`. The two latent states for component `j` drift over time via:\n\n ```python\n effect[t] = (effect[t-1] * cos(frequencies[j]) +\n auxiliary[t-] * sin(frequencies[j]) +\n Normal(0., drift_scale))\n\n auxiliary[t] = (-effect[t-1] * sin(frequencies[j]) +\n auxiliary[t-] * cos(frequencies[j]) +\n Normal(0., drift_scale))\n ```\n\n The `auxiliary` latent state only appears as a matter of construction and thus\n its interpretation is not particularly important. The total smooth seasonal\n effect is the sum of the `effect` values from each of the cyclic components.\n\n The parameters `drift_scale` and `observation_noise_scale` are each (a batch\n of) scalars. The batch shape of this `Distribution` is the broadcast batch\n shape of these parameters and of the `initial_state_prior`.\n\n #### Mathematical Details\n\n The smooth seasonal effect model implements a\n `tfp.distributions.LinearGaussianStateSpaceModel` with `latent_size = 2 *\n len(frequency_multipliers)` and `observation_size = 1`. The latent state is\n the concatenation of the cyclic latent states which themselves comprise an\n `effect` and an `auxiliary` state. The transition matrix is a block diagonal\n matrix where block `j` is:\n\n ```python\n transition_matrix[j] = [[cos(frequencies[j]), sin(frequencies[j])],\n [-sin(frequencies[j]), cos(frequencies[j])]]\n ```\n\n The observation model picks out the cyclic `effect` values from the latent\n state:\n\n ```\n observation_matrix = [[1., 0., 1., 0., ..., 1., 0.]]\n observation_noise ~ Normal(loc=0, scale=observation_noise_scale)\n ```\n\n For further mathematical details please see [1].\n\n #### Examples\n\n A state space model with smooth daily seasonality on hourly data. In other\n words, each day there is a pattern which broadly repeats itself over the\n course of the day and doesn't change too much from one hour to the next. Four\n random samples from such a model can be obtained via:\n\n ```python\n from matplotlib import pylab as plt\n\n ssm = SmoothSeasonalStateSpaceModel(\n num_timesteps=100,\n period=24,\n frequency_multipliers=[1, 4],\n drift_scale=0.1,\n initial_state_prior=tfd.MultivariateNormalDiag(\n scale_diag=tf.fill([4], 2.0)),\n )\n\n fig, axes = plt.subplots(4)\n\n series = ssm.sample(4)\n\n for series, ax in zip(series[..., 0], axes):\n ax.set_xticks(tf.range(ssm.num_timesteps, delta=ssm.period))\n ax.grid()\n ax.plot(series)\n\n plt.show()\n ```\n\n A comparison of the above with a comparable `Seasonal` component gives an\n example of the difference between these two components:\n\n ```python\n ssm = SeasonalStateSpaceModel(\n num_timesteps=100,\n num_seasons=24,\n num_steps_per_season=1,\n drift_scale=0.1,\n initial_state_prior=tfd.MultivariateNormalDiag(\n scale_diag=tf.fill([24], 2.0)),\n )\n ```\n\n #### References\n\n [1]: Harvey, A. Forecasting, Structural Time Series Models and the Kalman\n Filter. Cambridge: Cambridge University Press, 1990.\n\n \"\"\"\n\n def __init__(self,\n num_timesteps,\n period,\n frequency_multipliers,\n drift_scale,\n initial_state_prior,\n observation_noise_scale=0.,\n initial_step=0,\n validate_args=False,\n allow_nan_stats=True,\n name=None):\n \"\"\"Build a smooth seasonal state space model.\n\n Args:\n num_timesteps: Scalar `int` `Tensor` number of timesteps to model\n with this distribution.\n period: positive scalar `float` `Tensor` giving the number of timesteps\n required for the longest cyclic effect to repeat.\n frequency_multipliers: One-dimensional `float` `Tensor` listing the\n frequencies (cyclic components) included in the model, as multipliers of\n the base/fundamental frequency `2. * pi / period`. Each component is\n specified by the number of times it repeats per period, and adds two\n latent dimensions to the model. A smooth seasonal model that can\n represent any periodic function is given by `frequency_multipliers = [1,\n 2, ..., floor(period / 2)]`. However, it is often desirable to enforce a\n smoothness assumption (and reduce the computational burden) by dropping\n some of the higher frequencies.\n drift_scale: Scalar (any additional dimensions are treated as batch\n dimensions) `float` `Tensor` indicating the standard deviation of the\n latent state transitions.\n initial_state_prior: instance of `tfd.MultivariateNormal`\n representing the prior distribution on latent states. Must have\n event shape `[num_features]`.\n observation_noise_scale: Scalar (any additional dimensions are\n treated as batch dimensions) `float` `Tensor` indicating the standard\n deviation of the observation noise.\n Default value: `0.`.\n initial_step: scalar `int` `Tensor` specifying the starting timestep.\n Default value: `0`.\n validate_args: Python `bool`. Whether to validate input with asserts. If\n `validate_args` is `False`, and the inputs are invalid, correct behavior\n is not guaranteed.\n Default value: `False`.\n allow_nan_stats: Python `bool`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n Default value: `True`.\n name: Python `str` name prefixed to ops created by this class.\n Default value: 'SmoothSeasonalStateSpaceModel'.\n\n \"\"\"\n\n with tf.name_scope(name or 'SmoothSeasonalStateSpaceModel') as name:\n\n dtype = dtype_util.common_dtype(\n [period, frequency_multipliers, drift_scale, initial_state_prior])\n\n period = tf.convert_to_tensor(\n value=period, name='period', dtype=dtype)\n\n frequency_multipliers = tf.convert_to_tensor(\n value=frequency_multipliers,\n name='frequency_multipliers',\n dtype=dtype)\n\n drift_scale = tf.convert_to_tensor(\n value=drift_scale, name='drift_scale', dtype=dtype)\n\n observation_noise_scale = tf.convert_to_tensor(\n value=observation_noise_scale,\n name='observation_noise_scale',\n dtype=dtype)\n\n num_frequencies = static_num_frequencies(frequency_multipliers)\n\n observation_matrix = tf.tile(\n input=tf.constant([[1., 0.]], dtype=dtype),\n multiples=[1, num_frequencies])\n\n transition_matrix = build_smooth_seasonal_transition_matrix(\n period=period,\n frequency_multipliers=frequency_multipliers,\n dtype=dtype)\n\n self._drift_scale = drift_scale\n self._observation_noise_scale = observation_noise_scale\n self._period = period\n self._frequency_multipliers = frequency_multipliers\n\n super(SmoothSeasonalStateSpaceModel, self).__init__(\n num_timesteps=num_timesteps,\n transition_matrix=transition_matrix,\n transition_noise=tfd.MultivariateNormalDiag(\n scale_diag=(drift_scale[..., tf.newaxis] *\n tf.ones([2 * num_frequencies], dtype=dtype)),\n name='transition_noise'),\n observation_matrix=observation_matrix,\n observation_noise=tfd.MultivariateNormalDiag(\n scale_diag=observation_noise_scale[..., tf.newaxis],\n name='observation_noise'),\n initial_state_prior=initial_state_prior,\n initial_step=initial_step,\n allow_nan_stats=allow_nan_stats,\n validate_args=validate_args,\n name=name)\n\n @property\n def drift_scale(self):\n \"\"\"Standard deviation of the drift in the cyclic effects.\"\"\"\n return self._drift_scale\n\n @property\n def observation_noise_scale(self):\n \"\"\"Standard deviation of the observation noise.\"\"\"\n return self._observation_noise_scale\n\n @property\n def period(self):\n \"\"\"The seasonal period.\"\"\"\n return self._period\n\n @property\n def frequency_multipliers(self):\n \"\"\"Multipliers of the fundamental frequency.\"\"\"\n return self._frequency_multipliers\n\n\ndef build_smooth_seasonal_transition_matrix(period,\n frequency_multipliers,\n dtype):\n \"\"\"Build the transition matrix for a SmoothSeasonalStateSpaceModel.\"\"\"\n\n two_pi = tf.constant(2. * np.pi, dtype=dtype)\n frequencies = two_pi * frequency_multipliers / period\n num_frequencies = static_num_frequencies(frequency_multipliers)\n\n sin_frequencies = tf.sin(frequencies)\n cos_frequencies = tf.cos(frequencies)\n\n trigonometric_values = tf.stack(\n [cos_frequencies, sin_frequencies, -sin_frequencies, cos_frequencies],\n axis=-1)\n\n transition_matrix = tf.linalg.LinearOperatorBlockDiag(\n [tf.linalg.LinearOperatorFullMatrix(\n matrix=tf.reshape(trigonometric_values[i], [2, 2]),\n is_square=True) for i in range(num_frequencies)]\n )\n\n return transition_matrix\n\n\ndef static_num_frequencies(frequency_multipliers):\n \"\"\"Statically known number of frequencies. Raises if not possible.\"\"\"\n\n frequency_multipliers = tf.convert_to_tensor(\n frequency_multipliers, name='frequency_multipliers')\n\n num_frequencies = tf.compat.dimension_value(\n dimension=frequency_multipliers.shape[0])\n\n if num_frequencies is None:\n raise ValueError('The number of frequencies must be statically known. Saw '\n '`frequency_multipliers` with shape {}'.format(\n frequency_multipliers.shape))\n\n return num_frequencies\n\n\nclass SmoothSeasonal(StructuralTimeSeries):\n \"\"\"Formal representation of a smooth seasonal effect model.\n\n The smooth seasonal model uses a set of trigonometric terms in order to\n capture a recurring pattern whereby adjacent (in time) effects are\n similar. The model uses `frequencies` calculated via:\n\n ```python\n frequencies[j] = 2. * pi * frequency_multipliers[j] / period\n ```\n\n and then posits two latent states for each `frequency`. The two latent states\n associated with frequency `j` drift over time via:\n\n ```python\n effect[t] = (effect[t-1] * cos(frequencies[j]) +\n auxiliary[t-] * sin(frequencies[j]) +\n Normal(0., drift_scale))\n\n auxiliary[t] = (-effect[t-1] * sin(frequencies[j]) +\n auxiliary[t-] * cos(frequencies[j]) +\n Normal(0., drift_scale))\n ```\n\n where `effect` is the smooth seasonal effect and `auxiliary` only appears as a\n matter of construction. The interpretation of `auxiliary` is thus not\n particularly important.\n\n #### Examples\n\n A smooth seasonal effect model representing smooth weekly seasonality on daily\n data:\n\n ```python\n component = SmoothSeasonal(\n period=7,\n frequency_multipliers=[1, 2, 3],\n initial_state_prior=tfd.MultivariateNormalDiag(scale_diag=tf.ones([6])),\n )\n ```\n\n \"\"\"\n\n def __init__(self,\n period,\n frequency_multipliers,\n drift_scale_prior=None,\n initial_state_prior=None,\n observed_time_series=None,\n name=None):\n \"\"\"Specify a smooth seasonal effects model.\n\n Args:\n period: positive scalar `float` `Tensor` giving the number of timesteps\n required for the longest cyclic effect to repeat.\n frequency_multipliers: One-dimensional `float` `Tensor` listing the\n frequencies (cyclic components) included in the model, as multipliers of\n the base/fundamental frequency `2. * pi / period`. Each component is\n specified by the number of times it repeats per period, and adds two\n latent dimensions to the model. A smooth seasonal model that can\n represent any periodic function is given by `frequency_multipliers = [1,\n 2, ..., floor(period / 2)]`. However, it is often desirable to enforce a\n smoothness assumption (and reduce the computational burden) by dropping\n some of the higher frequencies.\n drift_scale_prior: optional `tfd.Distribution` instance specifying a prior\n on the `drift_scale` parameter. If `None`, a heuristic default prior is\n constructed based on the provided `observed_time_series`.\n Default value: `None`.\n initial_state_prior: instance of `tfd.MultivariateNormal` representing\n the prior distribution on the latent states. Must have event shape\n `[2 * len(frequency_multipliers)]`. If `None`, a heuristic default prior\n is constructed based on the provided `observed_time_series`.\n observed_time_series: optional `float` `Tensor` of shape\n `batch_shape + [T, 1]` (omitting the trailing unit dimension is also\n supported when `T > 1`), specifying an observed time series.\n Any priors not explicitly set will be given default values according to\n the scale of the observed time series (or batch of time series). May\n optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes\n a mask `Tensor` to specify timesteps with missing observations.\n Default value: `None`.\n name: the name of this model component.\n Default value: 'SmoothSeasonal'.\n\n \"\"\"\n\n with tf.name_scope(name or 'SmoothSeasonal') as name:\n\n _, observed_stddev, observed_initial = (\n sts_util.empirical_statistics(observed_time_series)\n if observed_time_series is not None else (0., 1., 0.))\n\n latent_size = 2 * static_num_frequencies(frequency_multipliers)\n\n # Heuristic default priors. Overriding these may dramatically\n # change inference performance and results.\n if drift_scale_prior is None:\n drift_scale_prior = tfd.LogNormal(\n loc=tf.math.log(.01 * observed_stddev), scale=3.)\n\n if initial_state_prior is None:\n initial_state_scale = (\n tf.abs(observed_initial) + observed_stddev)[..., tf.newaxis]\n ones = tf.ones([latent_size], dtype=drift_scale_prior.dtype)\n initial_state_prior = tfd.MultivariateNormalDiag(\n scale_diag=initial_state_scale * ones)\n\n self._initial_state_prior = initial_state_prior\n self._period = period\n self._frequency_multipliers = frequency_multipliers\n\n super(SmoothSeasonal, self).__init__(\n parameters=[\n Parameter('drift_scale', drift_scale_prior,\n tfb.Chain([tfb.AffineScalar(scale=observed_stddev),\n tfb.Softplus()])),\n ],\n latent_size=latent_size,\n name=name)\n\n @property\n def period(self):\n \"\"\"The seasonal period.\"\"\"\n return self._period\n\n @property\n def frequency_multipliers(self):\n \"\"\"Multipliers of the fundamental frequency.\"\"\"\n return self._frequency_multipliers\n\n @property\n def initial_state_prior(self):\n \"\"\"Prior distribution on the initial latent states.\"\"\"\n return self._initial_state_prior\n\n def _make_state_space_model(self,\n num_timesteps,\n param_map,\n initial_state_prior=None,\n initial_step=0):\n\n if initial_state_prior is None:\n initial_state_prior = self.initial_state_prior\n\n return SmoothSeasonalStateSpaceModel(\n num_timesteps=num_timesteps,\n period=self.period,\n frequency_multipliers=self.frequency_multipliers,\n initial_state_prior=initial_state_prior,\n initial_step=initial_step,\n **param_map)\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests of the allocation strategy optimization pass.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.experimental.auto_batching import allocation_strategy\nfrom tensorflow_probability.python.experimental.auto_batching import instructions as inst\nfrom tensorflow_probability.python.experimental.auto_batching import test_programs\n\n\ndef strip_pop_ops(program):\n # Why might this be useful? Because a variable waiting to be popped by an\n # explicit PopOp registers as \"live\" to the liveness analysis, which causes it\n # to demand a heavier allocation strategy than it actually deserves. For an\n # example of the difference this can make, compare the answers in\n # `testAllocatingIsEvenProgram` and `testAllocatingIsEvenProgramNoPops`.\n def walk_graph(graph):\n for i in range(graph.exit_index()):\n block = graph.block(i)\n block.instructions = [op for op in block.instructions\n if not isinstance(op, inst.PopOp)]\n walk_graph(program.graph)\n for func in program.functions:\n walk_graph(func.graph)\n\n\nclass AllocationStrategyTest(tf.test.TestCase):\n\n def assertAllocates(self, expected, prog):\n allocated = allocation_strategy.optimize(prog)\n self.assertEqual(expected, allocated.var_alloc)\n\n def testAllocatingConstantProgram(self):\n prog = test_programs.constant_program()\n answer = {inst.pc_var: inst.VariableAllocation.REGISTER,\n 'answer': inst.VariableAllocation.REGISTER}\n self.assertAllocates(answer, prog)\n\n def testAllocatingIfProgram(self):\n prog = test_programs.single_if_program()\n answer = {inst.pc_var: inst.VariableAllocation.REGISTER,\n 'answer': inst.VariableAllocation.REGISTER,\n 'cond': inst.VariableAllocation.REGISTER,\n 'input': inst.VariableAllocation.REGISTER}\n self.assertAllocates(answer, prog)\n\n def testAllocatingIsEvenProgram(self):\n prog = test_programs.is_even_function_calls()\n answer = {inst.pc_var: inst.VariableAllocation.FULL,\n 'ans': inst.VariableAllocation.REGISTER,\n 'cond': inst.VariableAllocation.REGISTER,\n 'n': inst.VariableAllocation.REGISTER,\n 'n1': inst.VariableAllocation.REGISTER,\n 'nm1': inst.VariableAllocation.FULL}\n self.assertAllocates(answer, prog)\n\n def testAllocatingIsEvenProgramNoPops(self):\n prog = test_programs.is_even_function_calls()\n strip_pop_ops(prog)\n answer = {inst.pc_var: inst.VariableAllocation.FULL,\n 'ans': inst.VariableAllocation.REGISTER,\n 'cond': inst.VariableAllocation.REGISTER,\n 'n': inst.VariableAllocation.REGISTER,\n 'n1': inst.VariableAllocation.REGISTER,\n 'nm1': inst.VariableAllocation.TEMPORARY}\n self.assertAllocates(answer, prog)\n\n def testAllocatingFibonacciProgram(self):\n prog = test_programs.fibonacci_function_calls()\n answer = {inst.pc_var: inst.VariableAllocation.FULL,\n 'ans': inst.VariableAllocation.REGISTER,\n 'cond': inst.VariableAllocation.REGISTER,\n 'fibm1': inst.VariableAllocation.FULL,\n 'fibm2': inst.VariableAllocation.TEMPORARY,\n 'n': inst.VariableAllocation.FULL,\n 'n1': inst.VariableAllocation.REGISTER,\n 'nm1': inst.VariableAllocation.TEMPORARY,\n 'nm2': inst.VariableAllocation.TEMPORARY}\n self.assertAllocates(answer, prog)\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for multivariate von Mises-Fisher distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nfrom scipy import special as sp_special\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.distributions.von_mises_fisher import _bessel_ive\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.internal import test_util as tfp_test_util\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass VonMisesFisherTest(tfp_test_util.VectorDistributionTestHelpers,\n tf.test.TestCase):\n\n def testBesselIve(self):\n self.assertRaises(ValueError, lambda: _bessel_ive(2.0, 1.0))\n # Zero is not a supported value for z.\n self.assertRaises(tf.errors.InvalidArgumentError,\n lambda: self.evaluate(_bessel_ive(1.5, 0.0)))\n z = np.logspace(-6, 2, 20).astype(np.float64)\n for v in np.float64([-0.5, 0, 0.5, 1, 1.5]):\n self.assertAllClose(sp_special.ive(v, z), _bessel_ive(v, z))\n\n def testSampleMeanDir2d(self):\n mean_dirs = tf.math.l2_normalize([[1., 1], [-2, 1], [0, -1]], axis=-1)\n concentration = [[0], [0.1], [2], [40], [1000]]\n vmf = tfp.distributions.VonMisesFisher(\n mean_direction=mean_dirs,\n concentration=concentration,\n validate_args=True,\n allow_nan_stats=False)\n self.assertEqual([5, 3], tensorshape_util.as_list(vmf.batch_shape))\n self.assertEqual([2], tensorshape_util.as_list(vmf.event_shape))\n nsamples = 12000\n samples = vmf.sample(\n sample_shape=[nsamples], seed=tfp_test_util.test_seed())\n self.assertEqual([nsamples, 5, 3, 2],\n tensorshape_util.as_list(samples.shape))\n sample_mean = self.evaluate(samples).mean(axis=0)\n # Assert that positive-concentration distributions have samples with\n # the expected mean direction.\n sample_dir = (\n sample_mean / np.linalg.norm(sample_mean, axis=-1, keepdims=True))\n inner_product = self.evaluate(\n tf.reduce_sum(input_tensor=sample_dir * vmf.mean_direction, axis=-1))\n # All except the 0-concentration distribution should have >0 inner product\n # with the mean direction of the distribution.\n self.assertAllGreater(inner_product[1:], 0.1)\n # Pick out >1 concentration distributions to assert ~1 inner product with\n # mean direction.\n self.assertAllClose(np.ones_like(inner_product)[2:], inner_product[2:],\n atol=1e-3)\n # Inner products should be roughly ascending by concentration.\n self.assertAllEqual(np.round(np.sort(inner_product, axis=0), decimals=3),\n np.round(inner_product, decimals=3))\n means = self.evaluate(vmf.mean())\n # Mean vector for 0-concentration is precisely (0, 0).\n self.assertAllEqual(np.zeros_like(means[0]), means[0])\n mean_lengths = np.linalg.norm(means, axis=-1)\n # Length of the mean vector is strictly ascending with concentration.\n self.assertAllEqual(mean_lengths, np.sort(mean_lengths, axis=0))\n self.assertAllClose(np.linalg.norm(sample_mean, axis=-1), mean_lengths,\n atol=0.03)\n\n def testSampleMeanDir3d(self):\n mean_dir = tf.math.l2_normalize([[1., 2, 3], [-2, -3, -1]], axis=-1)\n concentration = [[0], [0.1], [2], [40], [1000]]\n vmf = tfp.distributions.VonMisesFisher(\n mean_direction=mean_dir,\n concentration=concentration,\n validate_args=True,\n allow_nan_stats=False)\n self.assertEqual([5, 2], tensorshape_util.as_list(vmf.batch_shape))\n self.assertEqual([3], tensorshape_util.as_list(vmf.event_shape))\n nsamples = int(2e4)\n samples = vmf.sample(\n sample_shape=[nsamples], seed=tfp_test_util.test_seed())\n self.assertEqual([nsamples, 5, 2, 3],\n tensorshape_util.as_list(samples.shape))\n sample_mean = self.evaluate(samples).mean(axis=0)\n # Assert that positive-concentration distributions have samples with\n # the expected mean direction.\n sample_dir = (\n sample_mean / np.linalg.norm(sample_mean, axis=-1, keepdims=True))\n inner_product = self.evaluate(\n tf.reduce_sum(input_tensor=sample_dir * vmf.mean_direction, axis=-1))\n # All except the 0-concentration distribution should have >0 inner product\n # with the mean direction of the distribution.\n self.assertAllGreater(inner_product[1:], 0.1)\n # Pick out >1 concentration distributions to assert ~1 inner product with\n # mean direction.\n self.assertAllClose(np.ones_like(inner_product)[2:], inner_product[2:],\n atol=1e-3)\n # Inner products should be roughly ascending by concentration.\n self.assertAllEqual(np.round(np.sort(inner_product, axis=0), decimals=3),\n np.round(inner_product, decimals=3))\n means = self.evaluate(vmf.mean())\n # Mean vector for 0-concentration is precisely (0, 0, 0).\n self.assertAllEqual(np.zeros_like(means[0]), means[0])\n mean_lengths = np.linalg.norm(means, axis=-1)\n # Length of the mean vector is strictly ascending with concentration.\n self.assertAllEqual(mean_lengths, np.sort(mean_lengths, axis=0))\n self.assertAllClose(np.linalg.norm(sample_mean, axis=-1), mean_lengths,\n atol=0.03)\n\n def _verifyPdfWithNumpy(self, vmf, atol=1e-4):\n \"\"\"Verifies log_prob evaluations with numpy/scipy.\n\n Both uniform random points and sampled points are evaluated.\n\n Args:\n vmf: A `tfp.distributions.VonMisesFisher` instance.\n atol: Absolute difference tolerable.\n \"\"\"\n dim = tf.compat.dimension_value(vmf.event_shape[-1])\n nsamples = 10\n # Sample some random points uniformly over the hypersphere using numpy.\n sample_shape = [nsamples] + tensorshape_util.as_list(\n vmf.batch_shape) + [dim]\n uniforms = np.random.randn(*sample_shape)\n uniforms /= np.linalg.norm(uniforms, axis=-1, keepdims=True)\n uniforms = uniforms.astype(dtype_util.as_numpy_dtype(vmf.dtype))\n # Concatenate in some sampled points from the distribution under test.\n vmf_samples = vmf.sample(\n sample_shape=[nsamples], seed=tfp_test_util.test_seed())\n samples = tf.concat([uniforms, vmf_samples], axis=0)\n samples = tf.debugging.check_numerics(samples, 'samples')\n samples = self.evaluate(samples)\n log_prob = vmf.log_prob(samples)\n log_prob = tf.debugging.check_numerics(log_prob, 'log_prob')\n conc = self.evaluate(vmf.concentration)\n mean_dir = self.evaluate(vmf.mean_direction)\n log_true_sphere_surface_area = (\n np.log(2) + (dim / 2) * np.log(np.pi) - sp_special.gammaln(dim / 2))\n expected = (\n conc * np.sum(samples * mean_dir, axis=-1) +\n np.where(conc > 0,\n (dim / 2 - 1) * np.log(conc) -\n (dim / 2) * np.log(2 * np.pi) -\n np.log(sp_special.ive(dim / 2 - 1, conc)) -\n np.abs(conc),\n -log_true_sphere_surface_area))\n self.assertAllClose(expected, self.evaluate(log_prob),\n atol=atol)\n\n def _verifySampleAndPdfConsistency(self, vmf, rtol=0.075):\n \"\"\"Verifies samples are consistent with the PDF using importance sampling.\n\n In particular, we verify an estimate the surface area of the n-dimensional\n hypersphere, and the surface areas of the spherical caps demarcated by\n a handful of survival rates.\n\n Args:\n vmf: A `VonMisesFisher` distribution instance.\n rtol: Relative difference tolerable.\n \"\"\"\n dim = tf.compat.dimension_value(vmf.event_shape[-1])\n nsamples = 50000\n samples = vmf.sample(\n sample_shape=[nsamples], seed=tfp_test_util.test_seed())\n samples = tf.debugging.check_numerics(samples, 'samples')\n log_prob = vmf.log_prob(samples)\n log_prob = tf.debugging.check_numerics(log_prob, 'log_prob')\n log_importance = -log_prob\n sphere_surface_area_estimate, samples, importance, conc = self.evaluate([\n tf.exp(\n tf.reduce_logsumexp(input_tensor=log_importance, axis=0) -\n tf.math.log(tf.cast(nsamples, dtype=tf.float32))), samples,\n tf.exp(log_importance), vmf.concentration\n ])\n true_sphere_surface_area = 2 * (np.pi)**(dim / 2) * self.evaluate(\n tf.exp(-tf.math.lgamma(dim / 2)))\n # Broadcast to correct size\n true_sphere_surface_area += np.zeros_like(sphere_surface_area_estimate)\n # Highly concentrated distributions do not get enough coverage to provide\n # a reasonable full-sphere surface area estimate. These are covered below\n # by CDF-based hypersphere cap surface area estimates.\n self.assertAllClose(\n true_sphere_surface_area[np.where(conc < 3)],\n sphere_surface_area_estimate[np.where(conc < 3)],\n rtol=rtol)\n\n # Assert surface area of hyperspherical cap For some CDFs in [.05,.45],\n # (h must be greater than 0 for the hypersphere cap surface area\n # calculation to hold).\n for survival_rate in 0.95, .9, .75, .6:\n cdf = (1 - survival_rate)\n mean_dir = self.evaluate(vmf.mean_direction)\n dotprods = np.sum(samples * mean_dir, -1)\n # Empirical estimate of the effective dot-product of the threshold that\n # selects for a given CDF level, that is the cosine of the largest\n # passable angle, or the minimum cosine for a within-CDF sample.\n dotprod_thresh = np.percentile(\n dotprods, 100 * survival_rate, axis=0, keepdims=True)\n dotprod_above_thresh = np.float32(dotprods > dotprod_thresh)\n sphere_cap_surface_area_ests = (\n cdf * (importance * dotprod_above_thresh).sum(0) /\n dotprod_above_thresh.sum(0))\n h = (1 - dotprod_thresh)\n self.assertGreaterEqual(h.min(), 0) # h must be >= 0 for the eqn below\n true_sphere_cap_surface_area = (\n 0.5 * true_sphere_surface_area *\n self.evaluate(tf.math.betainc((dim - 1) / 2, 0.5, 2 * h - h**2)))\n if dim == 3: # For 3-d we have a simpler form we can double-check.\n self.assertAllClose(2 * np.pi * h, true_sphere_cap_surface_area)\n\n self.assertAllClose(\n true_sphere_cap_surface_area,\n sphere_cap_surface_area_ests +\n np.zeros_like(true_sphere_cap_surface_area),\n rtol=rtol)\n\n def _verifyCovariance(self, vmf):\n dim = tf.compat.dimension_value(vmf.event_shape[-1])\n nsamples = 10000\n samples = vmf.sample(nsamples, seed=tfp_test_util.test_seed())\n samples = tf.debugging.check_numerics(samples, 'samples')\n cov = vmf.covariance()\n samples, cov = self.evaluate([samples, cov])\n batched_samples = np.reshape(samples, [nsamples, -1, dim])\n batch_size = batched_samples.shape[1]\n est_cov = np.zeros([batch_size, dim, dim], dtype=cov.dtype)\n for bi in range(batched_samples.shape[1]):\n est_cov[bi] = np.cov(batched_samples[:, bi], rowvar=False)\n self.assertAllClose(\n np.reshape(est_cov, cov.shape),\n cov,\n atol=0.015)\n\n def testSampleAndPdfConsistency2d(self):\n mean_dir = tf.math.l2_normalize([[1., 2], [-2, -3]], axis=-1)\n concentration = [[0], [1e-5], [0.1], [1], [10]]\n vmf = tfp.distributions.VonMisesFisher(\n mean_direction=mean_dir, concentration=concentration,\n validate_args=True, allow_nan_stats=False)\n self._verifySampleAndPdfConsistency(vmf)\n self._verifyCovariance(vmf)\n self._verifyPdfWithNumpy(vmf)\n\n def testSampleAndPdfConsistency3d(self):\n mean_dir = tf.math.l2_normalize([[1., 2, 3], [-2, -3, -1]], axis=-1)\n concentration = [[0], [1e-5], [0.1], [1], [10]]\n vmf = tfp.distributions.VonMisesFisher(\n mean_direction=mean_dir, concentration=concentration,\n validate_args=True, allow_nan_stats=False)\n self._verifySampleAndPdfConsistency(vmf)\n # TODO(bjp): Enable self._verifyCovariance(vmf)\n self._verifyPdfWithNumpy(vmf, atol=.002)\n\n def testSampleAndPdfConsistency4d(self):\n mean_dir = tf.math.l2_normalize([[1., 2, 3, 4], [-2, -3, -1, 0]], axis=-1)\n concentration = [[0], [1e-4], [0.1], [1], [10]]\n vmf = tfp.distributions.VonMisesFisher(\n mean_direction=mean_dir, concentration=concentration,\n validate_args=True, allow_nan_stats=False)\n self._verifySampleAndPdfConsistency(vmf)\n # TODO(bjp): Enable self._verifyCovariance(vmf)\n self._verifyPdfWithNumpy(vmf)\n\n def testSampleAndPdfConsistency5d(self):\n mean_dir = tf.math.l2_normalize([[1., 2, 3, 4, 5], [-2, -3, -1, 0, 1]],\n axis=-1)\n # TODO(bjp): Numerical instability 0 < k < 1e-2 concentrations.\n # Should resolve by eliminating the bessel_i recurrence in favor of\n # a more stable algorithm, e.g. cephes.\n concentration = [[0], [5e-2], [0.1], [1], [10]]\n vmf = tfp.distributions.VonMisesFisher(\n mean_direction=mean_dir, concentration=concentration,\n validate_args=True, allow_nan_stats=False)\n self._verifySampleAndPdfConsistency(vmf)\n # TODO(bjp): Enable self._verifyCovariance(vmf)\n self._verifyPdfWithNumpy(vmf, atol=2e-4)\n\n def testInternalShapeInference(self):\n # Regression test for the effect of b/139013403 on vMF sampling\n # The bug only triggers if TF2_BEHAVIOR=1.\n sample_shape = tf.constant([2])\n # There needs to be a 1 dimension in the batch shape to trigger the bug\n mean_dir = tf.math.l2_normalize([1., 2, 3, 4], axis=-1)\n concentration = [0]\n vmf = tfp.distributions.VonMisesFisher(\n mean_direction=mean_dir, concentration=concentration,\n validate_args=True, allow_nan_stats=False)\n self.evaluate(vmf.sample(sample_shape))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "numpy.random.randn", "tensorflow.test.main", "numpy.ones" ], [ "numpy.log", "tensorflow.compat.v2.test.main", "scipy.special.expit", "numpy.linspace", "numpy.log1p" ], [ "tensorflow.nest.flatten", "numpy.dtype", "tensorflow.as_dtype" ], [ "tensorflow.convert_to_tensor", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.map_fn", "tensorflow.cumsum", "tensorflow.gather", "tensorflow.compat.v1.name_scope", "tensorflow.unstack", "tensorflow.shape", "numpy.nonzero", "tensorflow.math.floordiv", "tensorflow.zeros_like", "numpy.array", "tensorflow.reduce_max", "tensorflow.constant", "tensorflow.range", "tensorflow.reshape", "numpy.ones", "tensorflow.math.log" ], [ "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.cos", "tensorflow.compat.v2.stack", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.compat.dimension_value", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.reshape", "tensorflow.compat.v2.sin", "tensorflow.compat.v2.math.log", "tensorflow.compat.v2.abs", "tensorflow.compat.v2.constant" ], [ "tensorflow.test.main" ], [ "tensorflow.compat.v2.exp", "scipy.special.ive", "numpy.round", "numpy.random.randn", "numpy.zeros_like", "tensorflow.compat.v2.reduce_sum", "numpy.where", "numpy.ones_like", "numpy.reshape", "tensorflow.compat.v2.math.lgamma", "tensorflow.compat.v2.math.l2_normalize", "numpy.float32", "numpy.zeros", "numpy.log", "tensorflow.compat.v2.test.main", "numpy.logspace", "tensorflow.compat.v2.debugging.check_numerics", "numpy.cov", "scipy.special.gammaln", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.math.betainc", "numpy.sum", "tensorflow.compat.v2.reduce_logsumexp", "numpy.abs", "tensorflow.compat.v2.concat", "numpy.linalg.norm", "numpy.sort", "numpy.percentile", "tensorflow.compat.v2.cast", "numpy.float64", "tensorflow.compat.v2.compat.dimension_value" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.18", "0.19" ], "tensorflow": [] } ]
mohakbhardwaj/mjmpc
[ "097e8d9bdaf0b3a15afa39030b2f53b00dfa25de", "097e8d9bdaf0b3a15afa39030b2f53b00dfa25de", "097e8d9bdaf0b3a15afa39030b2f53b00dfa25de", "097e8d9bdaf0b3a15afa39030b2f53b00dfa25de" ]
[ "mjmpc/control/olgaussian_mpc.py", "mjmpc/value_functions/quadratic_val_func.py", "mjmpc/utils/logger.py", "mjmpc/control/softqmpc/deprecated/test_sac.py" ]
[ "\"\"\"\nMPC with open-loop Gaussian policies\n\"\"\"\nfrom .controller import Controller\nfrom mjmpc.utils.control_utils import generate_noise, scale_ctrl\nimport copy\nimport numpy as np\nimport scipy.special\n\nclass OLGaussianMPC(Controller):\n def __init__(self, \n d_state,\n d_obs,\n d_action, \n action_lows,\n action_highs,\n horizon,\n init_cov,\n init_mean,\n base_action,\n num_particles,\n gamma,\n n_iters,\n step_size,\n filter_coeffs,\n set_sim_state_fn=None,\n rollout_fn=None,\n cov_type='diagonal',\n sample_mode='mean',\n batch_size=1,\n seed=0,\n use_zero_control_seq=False):\n \"\"\"\n Parameters\n __________\n base_action : str\n Action to append at the end when shifting solution to next timestep\n 'random' : appends random action\n 'null' : appends zero action\n 'repeat' : repeats second to last action\n num_particles : int\n Number of particles sampled at every iteration\n \"\"\"\n\n super(OLGaussianMPC, self).__init__(d_state,\n d_obs,\n d_action,\n action_lows, \n action_highs,\n horizon,\n gamma, \n n_iters,\n set_sim_state_fn,\n rollout_fn,\n sample_mode,\n batch_size,\n seed)\n self.init_cov = np.array([init_cov] * self.d_action)\n self.init_mean = init_mean.copy()\n self.mean_action = init_mean\n self.base_action = base_action\n self.num_particles = num_particles\n self.cov_type = cov_type\n self.cov_action = np.diag(self.init_cov)\n self.step_size = step_size\n self.filter_coeffs = filter_coeffs\n self.use_zero_control_seq = use_zero_control_seq\n\n def _get_next_action(self, state, mode='mean'):\n if mode == 'mean':\n next_action = self.mean_action[0].copy()\n elif mode == 'sample':\n delta = generate_noise(self.cov_action, self.filter_coeffs,\n shape=(1, 1), base_seed=self.seed_val + 123*self.num_steps)\n next_action = self.mean_action[0].copy() + delta.reshape(self.d_action).copy()\n else:\n raise ValueError('Unidentified sampling mode in get_next_action')\n return next_action\n \n # def sample_actions(self):\n # delta = generate_noise(self.cov_action, self.filter_coeffs,\n # shape=(self.num_particles, self.horizon), \n # base_seed = self.seed_val + self.num_steps) \n # act_seq = self.mean_action[None, :, :] + delta\n # # act_seq = scale_ctrl(act_seq, self.action_lows, self.action_highs)\n # return np.array(act_seq)\n\n def sample_noise(self):\n delta = generate_noise(self.cov_action, self.filter_coeffs,\n shape=(self.num_particles, self.horizon), \n base_seed = self.seed_val + self.num_steps) \n # act_seq = scale_ctrl(act_seq, self.action_lows, self.action_highs)\n return delta\n \n def generate_rollouts(self, state):\n \"\"\"\n Samples a batch of actions, rolls out trajectories for each particle\n and returns the resulting observations, costs, \n actions\n\n Parameters\n ----------\n state : dict or np.ndarray\n Initial state to set the simulation env to\n \"\"\"\n \n self._set_sim_state_fn(copy.deepcopy(state)) #set state of simulation\n # input('....')\n delta = self.sample_noise() #sample noise from covariance of current control distribution\n if self.use_zero_control_seq:\n delta[-1,:] = -1.0 * self.mean_action.copy()\n trajectories = self._rollout_fn(self.num_particles, self.horizon, \n self.mean_action, delta, mode=\"open_loop\") \n return trajectories\n \n def _shift(self):\n \"\"\"\n Predict good parameters for the next time step by\n shifting the mean forward one step\n \"\"\"\n self.mean_action[:-1] = self.mean_action[1:]\n if self.base_action == 'random':\n self.mean_action[-1] = np.random.normal(0, self.init_cov, self.d_action)\n elif self.base_action == 'null':\n self.mean_action[-1] = np.zeros((self.d_action, ))\n elif self.base_action == 'repeat':\n self.mean_action[-1] = self.mean_action[-2]\n else:\n raise NotImplementedError(\"invalid option for base action during shift\")\n\n def reset(self):\n self.num_steps = 0\n self.mean_action = np.zeros(shape=(self.horizon, self.d_action))\n self.cov_action = np.diag(self.init_cov)\n self.gamma_seq = np.cumprod([1.0] + [self.gamma] * (self.horizon - 1)).reshape(1, self.horizon)\n\n def _calc_val(self, cost_seq, act_seq):\n raise NotImplementedError(\"_calc_val not implemented\")\n\n", "import torch\nimport torch.nn as nn\n\n\nclass QuadraticVF(nn.Module):\n def __init__(self, d_obs):\n super(QuadraticVF, self).__init__()\n self.d_obs = d_obs\n self.d_input = int(d_obs + (d_obs * (d_obs+1))/2 + 1) #linear + quadratic + time\n self.linear = nn.Linear(self.d_input, 1)\n torch.nn.init.zeros_(self.linear.weight)\n torch.nn.init.zeros_(self.linear.bias)\n \n def forward(self, observation):\n num_paths = observation.shape[0]\n horizon = observation.shape[1]\n observation = torch.cat([p for p in observation])\n feat_mat = self.feature_mat(observation, horizon)\n value = self.linear(feat_mat)\n return value.view(num_paths, horizon)\n \n def feature_mat(self, obs, horizon):\n num_samples = obs.shape[0]\n feat_mat = torch.zeros(num_samples, self.d_input) #inputs\n\n #linear features\n feat_mat[:,:self.d_obs] = obs\n \n #quadratic features\n k = self.d_obs\n for i in range(self.d_obs):\n for j in range(i, self.d_obs):\n feat_mat[:,k] = obs[:,i]*obs[:,j] # element-wise product\n k += 1\n\n tsteps = torch.arange(1, horizon + 1, out=torch.FloatTensor()) / horizon\n num_paths = int(num_samples / horizon)\n tcol = tsteps.repeat(num_paths).float()\n feat_mat[:,-1] = tcol #torch.cat((feat_mat, tcol), dim=-1)\n return feat_mat\n\n def fit(self, observations, returns, delta_reg=0., return_errors=False):\n horizon = observations.shape[1]\n obs = torch.cat([p for p in observations])\n returns = torch.cat([p for p in returns])\n\n feat_mat = self.feature_mat(obs, horizon)\n #append 1 to columns for bias\n new_col = torch.ones(feat_mat.shape[0],1)\n feat_mat = torch.cat((feat_mat, new_col), axis=-1)\n \n if return_errors:\n predictions = self(observations)\n errors = returns - predictions.flatten()\n error_before = torch.sum(errors**2)/torch.sum(returns**2)\n\n for _ in range(10):\n coeffs = torch.lstsq(\n feat_mat.T.mv(returns),\n feat_mat.T.mm(feat_mat) + delta_reg * torch.eye(feat_mat.shape[1])\n )[0]\n if not torch.any(torch.isnan(coeffs)):\n break\n print('Got a nan')\n delta_reg *= 10\n self.linear.weight.data.copy_(coeffs[0:-1].T)\n self.linear.bias.data.copy_(coeffs[-1]) \n\n if return_errors:\n predictions = self(observations)\n errors = returns - predictions.flatten()\n error_after = torch.sum(errors**2)/torch.sum(returns**2)\n return error_before, error_after\n\n def print_parameters(self):\n for name, param in self.named_parameters():\n if param.requires_grad:\n print(name, param.data)\n\n\n\n \n \n", "import os\nimport sys\nfrom collections import defaultdict\nimport logging\nfrom colorlog import ColoredFormatter\n\nimport pandas\nimport numpy as np\n\nfrom tabulate import tabulate\n\n\nclass LoggerClass(object):\n GLOBAL_LOGGER_NAME = '_global_logger'\n\n _color_formatter = ColoredFormatter(\n \"%(asctime)s %(log_color)s%(name)-10s %(levelname)-8s%(reset)s %(white)s%(message)s\",\n datefmt='%m-%d %H:%M:%S',\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red,bg_white',\n },\n secondary_log_colors={},\n style='%'\n )\n\n _normal_formatter = logging.Formatter(\n '%(asctime)s %(name)-10s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M:%S',\n style='%'\n )\n\n def __init__(self):\n self._dir = None\n self._logger = None\n self._log_path = None\n self._csv_path = None\n self._tabular = defaultdict(list)\n self._curr_recorded = list()\n self._num_dump_tabular_calls = 0\n\n @property\n def dir(self):\n return self._dir\n\n #############\n ### Setup ###\n #############\n\n def setup(self, display_name, log_path, lvl):\n self._dir = os.path.dirname(log_path)\n # AL: create log dir if not exists\n if not os.path.exists(self._dir):\n os.makedirs(self._dir)\n\n self._logger = self._get_logger(LoggerClass.GLOBAL_LOGGER_NAME,\n log_path,\n lvl=lvl,\n display_name=display_name)\n self._csv_path = os.path.splitext(log_path)[0] + '.csv'\n\n ### load csv if exists\n if os.path.exists(self._csv_path):\n self._tabular = {k: list(v) for k, v in pandas.read_csv(self._csv_path).items()}\n self._num_dump_tabular_calls = len(tuple(self._tabular.values())[0])\n\n def _get_logger(self, name, log_path, lvl=logging.INFO, display_name=None):\n if isinstance(lvl, str):\n lvl = lvl.lower().strip()\n if lvl == 'debug':\n lvl = logging.DEBUG\n elif lvl == 'info':\n lvl = logging.INFO\n elif lvl == 'warn' or lvl == 'warning':\n lvl = logging.WARN\n elif lvl == 'error':\n lvl = logging.ERROR\n elif lvl == 'fatal' or lvl == 'critical':\n lvl = logging.CRITICAL\n else:\n raise ValueError('unknown logging level')\n\n file_handler = logging.FileHandler(log_path)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(LoggerClass._normal_formatter)\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(lvl)\n console_handler.setFormatter(LoggerClass._color_formatter)\n if display_name is None:\n display_name = name\n logger = logging.getLogger(display_name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(console_handler)\n logger.addHandler(file_handler)\n\n return logger\n\n ###############\n ### Logging ###\n ###############\n\n def debug(self, s):\n assert (self._logger is not None)\n self._logger.debug(s)\n\n def info(self, s):\n assert (self._logger is not None)\n self._logger.info(s)\n\n def warn(self, s):\n assert (self._logger is not None)\n self._logger.warn(s)\n\n def error(self, s):\n assert (self._logger is not None)\n self._logger.error(s)\n\n def critical(self, s):\n assert (self._logger is not None)\n self._logger.critical(s)\n\n ####################\n ### Data logging ###\n ####################\n\n def record_tabular(self, key, val):\n assert (str(key) not in self._curr_recorded)\n self._curr_recorded.append(str(key))\n\n if key in self._tabular:\n self._tabular[key].append(val)\n else:\n self._tabular[key] = [np.nan] * self._num_dump_tabular_calls + [val]\n\n def dump_tabular(self, print_func=None):\n if len(self._curr_recorded) == 0:\n return ''\n\n ### reset\n self._curr_recorded = list()\n self._num_dump_tabular_calls += 1\n\n ### make sure all same length\n for k, v in self._tabular.items():\n if len(v) == self._num_dump_tabular_calls:\n pass\n elif len(v) == self._num_dump_tabular_calls - 1:\n self._tabular[k].append(np.nan)\n else:\n raise ValueError('key {0} should not have {1} items when {2} calls have been made'.format(\n k, len(v), self._num_dump_tabular_calls))\n\n ### print\n if print_func is not None:\n log_str = tabulate(sorted([(k, v[-1]) for k, v in self._tabular.items()], key=lambda kv: kv[0]))\n for line in log_str.split('\\n'):\n print_func(line)\n\n ### write to file\n tabular_pandas = pandas.DataFrame({k: pandas.Series(v) for k, v in self._tabular.items()})\n tabular_pandas.to_csv(self._csv_path)\n\n\nlogger = LoggerClass()\n", "import argparse\nfrom copy import deepcopy\nfrom datetime import datetime\nimport gym\nimport numpy as np\nimport os\nimport sys\nimport tqdm\nimport yaml \n\nfrom mjmpc.control.softqmpc.algs import SAC\nfrom mjmpc.envs import GymEnvWrapper\nfrom mjmpc.utils import helpers\nimport mj_envs\nfrom stable_baselines3.sac import MlpPolicy\n\nparser = argparse.ArgumentParser(description='Run MPC algorithm on given environment')\nparser.add_argument('--config', type=str, help='yaml file with experiment parameters')\nparser.add_argument('--save_dir', type=str, default='/tmp', help='folder to save data in')\nparser.add_argument('--dump_vids', action='store_true', help='flag to dump video of episodes')\nparser.add_argument('--load_file', type=str, required=True, help='directory with weight file')\nargs = parser.parse_args()\n\n#Load experiment parameters from config file\nwith open(args.config) as file:\n exp_params = yaml.load(file, Loader=yaml.FullLoader)\n\n#Create the main environment\nenv_name = exp_params['env_name']\nenv = gym.make(env_name)\nenv = GymEnvWrapper(env)\nenv.real_env_step(True)\n\n#Create logger\ndate_time = datetime.now().strftime(\"%m_%d_%Y_%H_%M_%S\")\nlog_dir = args.save_dir + \"/\" + exp_params['env_name'] + \"/\" + date_time + \"/SAC/test/\" \nif not os.path.exists(log_dir): os.makedirs(log_dir)\nlogger = helpers.get_logger(\"sac\" + \"_\" + exp_params['env_name'], log_dir, 'debug')\nlogger.info(exp_params)\nexp_params['tensorboard_log'] = log_dir\n\nrender = exp_params['render']\nnum_test_episodes = exp_params['num_test_episodes']\ntest_seed = exp_params['test_seed']\ntotal_timesteps = exp_params['total_timesteps']\nexp_params.pop('env_name', None)\nexp_params.pop('render', None)\nexp_params.pop('num_test_episodes', None)\nexp_params.pop('test_seed', None)\nexp_params.pop('total_timesteps', None)\n\n#Define model and train\nmodel = SAC(MlpPolicy, env, **exp_params)\nmodel = SAC.load(args.load_file)\n\n\n#Commence testing\n#Main data collection loop\nep_rewards = np.array([0.] * num_test_episodes)\ntrajectories = []\nfor i in tqdm.tqdm(range(num_test_episodes)):\n #seeding to enforce consistent episodes\n episode_seed = test_seed + i*12345\n obs = env.reset(seed=episode_seed)\n \n #Collect data from interactions with environment\n observations = []; actions = []; rewards = []; dones = []\n infos = []; states = []; next_states = []\n for t in tqdm.tqdm(range(exp_params['max_ep_length'])): \n curr_state = deepcopy(env.get_env_state())\n action, value = model.predict(obs)\n\n obs, reward, done, info = env.step(action)\n \n observations.append(obs); actions.append(action)\n rewards.append(reward); dones.append(done)\n infos.append(info); states.append(curr_state)\n ep_rewards[i] += reward\n \n traj = dict(\n observations=np.array(observations),\n actions=np.array(actions),\n rewards=np.array(rewards),\n dones=np.array(dones),\n env_infos=helpers.stack_tensor_dict_list(infos),\n states=states\n )\n trajectories.append(traj)\n\nsuccess_metric = env.env.unwrapped.evaluate_success(trajectories)\naverage_reward = np.average(ep_rewards)\nreward_std = np.std(ep_rewards)\n\n#Display logs on screen and save in txt file\nlogger.info('Avg. reward = {0}, Std. Reward = {1}, Success Metric = {2}'.format(average_reward, reward_std, success_metric))\n\n#Can also dump data to csv once done\nlogger.record_tabular(\"AverageReward\", average_reward)\nlogger.record_tabular(\"StdReward\", reward_std)\nlogger.record_tabular(\"SuccessMetric\", success_metric)\nlogger.record_tabular(\"NumEpisodes\", num_test_episodes)\nlogger.dump_tabular()\n\nif args.dump_vids:\n print('Dumping videos')\n helpers.dump_videos(env=env, trajectories=trajectories, frame_size=(1280, 720), \n folder=log_dir, filename='vid_traj_', camera_name=None,\n device_id=1)\n\nif render:\n _ = input(\"Press enter to display optimized trajectories (will be played 3 times) : \")\n helpers.render_trajs(env, trajectories, n_times=3)\n\nenv.close()\n" ]
[ [ "numpy.diag", "numpy.random.normal", "numpy.cumprod", "numpy.array", "numpy.zeros" ], [ "torch.ones", "torch.isnan", "torch.zeros", "torch.cat", "torch.sum", "torch.eye", "torch.nn.Linear", "torch.FloatTensor", "torch.nn.init.zeros_" ], [ "pandas.read_csv", "pandas.Series" ], [ "numpy.std", "numpy.array", "numpy.average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kite8/quant_learning
[ "d823974cd2b5a6b8e2a20fe42d7334051fa46ea0", "d823974cd2b5a6b8e2a20fe42d7334051fa46ea0" ]
[ "STS_v2/compute_high_low_limit_v3.py", "STS_v3/daily_fixing.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 2 15:19:45 2018\n\n@author: kite\n\"\"\"\n\nimport datetime, time\nfrom pymongo import UpdateOne, ASCENDING, UpdateMany\nfrom database import DB_CONN\nfrom stock_util import get_trading_dates, get_all_codes\nimport tushare as ts\nimport numpy as np\nimport pandas as pd\nimport requests\nimport json\nimport datetime\n\n\"\"\"\n计算涨跌停价格\n\n只要获取到前一天的价格\n\n获取name和上市日期\n\n最新ipo规则\n如果是上市当天,则涨停价是上市发行价格的1.44倍\n所以需要获取到发行价格\n要不是\n\"\"\"\n\n# 获取发行价格并保存到数据库中\ndef fill_issueprice_and_timeToMarket():\n \"\"\"\n ipo_info.xlsx 是从东方choice中提取出来;\n columns:\n code -- 股票代码\n name -- 股票当前名字\n issueprice -- 发行价格\n timeToMarket -- 上市时间\n \"\"\"\n df = pd.read_excel('data/ipo_info.xlsx', header=0, dtype={'code':str})\n df = df.set_index('code')\n codes = df.index.tolist()\n \n update_requests = []\n \n for i,code in enumerate(codes):\n try:\n update_requests.append(\n UpdateOne(\n {'code':code},\n {'$set':{'issueprice':df.issueprice[code],\n 'timeToMarket':df.timeToMarket[code]}},\n upsert=True))\n except:\n print('code: %s, has problem' % code)\n \n if len(update_requests)>0:\n update_result = DB_CONN['basic'].bulk_write(update_requests, ordered=False)\n print('填充字段, 字段名: issueprice,数据集:%s,插入:%4d条,更新:%4d条' %\n ('basic', update_result.upserted_count, update_result.modified_count), flush=True)\n\ndef fixing_is_st(start, end):\n # 第一阶段\n df = pd.read_excel('data/stock_basic.xlsx', header=0, dtype={'code':str})\n df = df.set_index('code')\n codes = df[df['是否ST过'] == 1].index.tolist()\n total = len(codes)\n# all_dates = get_trading_dates(start, end)\n \n daily = DB_CONN['daily']\n \n excel_name = 'data/st_info.xlsx'\n for i in range(4):\n if i == 0:\n all_dates = get_trading_dates('2015-01-01', '2015-12-31')\n elif i == 1:\n all_dates = get_trading_dates('2016-01-01', '2016-12-31')\n if i == 2:\n all_dates = get_trading_dates('2017-01-01', '2017-12-31')\n elif i == 3:\n all_dates = get_trading_dates('2018-01-01', '2018-09-30')\n \n \n print('数据读取中')\n df = pd.read_excel(excel_name, i, header=0, dtype={'code':str})\n df = df.set_index(['code','state'])\n df.columns = df.columns.astype(np.datetime64)\n df.columns = df.columns.to_period('D')\n df.columns = df.columns.astype('str')\n print('数据读取完毕')\n \n \n for j, code in enumerate(codes):\n update_requests = []\n for date in all_dates:\n try:\n st_state = df.xs([code])[date]['是否ST']\n sst_state = df.xs([code])[date]['是否*ST']\n if (st_state == '否') and (sst_state == '否'):\n is_st_flag = False\n else:\n is_st_flag = True\n \n update_requests.append(\n UpdateOne(\n {'code':code, 'date':date, 'index':False},\n {'$set':{'is_st':is_st_flag}}\n )\n )\n except:\n print('something is wrong, code : %s, date : %s' % (code, date))\n \n if len(update_requests)>0:\n update_result = daily.bulk_write(update_requests, ordered=False)\n print('第%s年填充进度: %s/%s, 字段名: is_st,数据集:%s,插入:%4d条,更新:%4d条' %\n (i+1, j+1, total, 'daily', update_result.upserted_count, update_result.modified_count), flush=True)\n \n \n\ndef fill_high_and_low_price_between(start, end):\n \n \"\"\"\n for code in codes:\n timeToMarket = basic.find()\n \n for \n \"\"\"\n# st_mark = ['st', 'ST', '*st', '*ST']\n codes = ts.get_stock_basics().index.tolist()\n _df = pd.read_excel('data/stock_basic.xlsx', header=0, dtype={'code':str})\n _df = _df.set_index('code')\n st_codes = _df[_df['是否ST过'] == 1].index.tolist()\n total = len(codes)\n error_code = []\n\n for i,code in enumerate(codes):\n try:\n timeToMarket = DB_CONN['basic'].find_one({'code':code}, \n projection={'code':True, 'timeToMarket':True, '_id':False})['timeToMarket']\n except:\n error_code.append(code)\n continue\n \n daily_cursor = DB_CONN['daily'].find(\n {'code':code, 'date':{'$lte': end, '$gte': timeToMarket}, 'index':False},\n projection={'code':True, 'date':True, 'pre_close':True, '_id':False})\n \n update_requests = []\n \n for j,daily in enumerate(daily_cursor):\n date = daily['date']\n \n try:\n pre_close = daily['pre_close']\n except:\n if (j == 0) & (timeToMarket != date):\n pass\n# print('code: %s, time: %s, 数据初始日没有pre_close' % (code, date))\n elif timeToMarket == date:\n# print('code: %s, date: %s' % (code, date))\n issueprice = DB_CONN['basic'].find_one({'code':code},\n projection={'issueprice':True, '_id':False})['issueprice']\n \n high_limit = np.round(np.round(issueprice * 1.2, 2) * 1.2, 2)\n low_limit = np.round(np.round(issueprice * 0.8, 2) * 0.8, 2)\n \n update_requests.append(\n UpdateOne({'code':code, 'date':date, 'index':False},\n {'$set':{'high_limit':high_limit, 'low_limit':low_limit}},\n upsert=True))\n else:\n print('code: %s, time: %s, ipo_date: %s, 请速查原因' % (code, date, timeToMarket))\n error_code.append(code)\n continue\n \n# if date < '2016-08-09':\n# _date = '2016-08-09'\n# else:\n# _date = date\n# \n# try:\n# name = DB_CONN['basic'].find_one({'code':code, 'date':_date},\n# projection={'name':True, '_id':False})['name']\n# last_name = name\n# except:\n# if j == 0:\n# name = DB_CONN['basic'].find_one({'code':code},\n# projection={'name':True, '_id':False})['name']\n# last_name = name\n# else:\n## print('code: %s, date: %s' % (code, date))\n# name = last_name\n \n# if timeToMarket == date:\n# \n# issueprice = DB_CONN['basic'].find_one({'code':code},\n# projection={'issueprice':True, '_id':False})['issueprice']\n# \n# high_limit = np.round(np.round(issueprice * 1.2, 2) * 1.2, 2)\n# low_limit = np.round(np.round(issueprice * 0.8, 2) * 0.8, 2)\n\n# if daily['is_st'] :\n if code in st_codes:\n st_flag = DB_CONN['daily'].find_one({'code':code, 'date':date, 'index':False})['is_st']\n if st_flag:\n high_limit = np.round(pre_close * 1.05, 2)\n low_limit = np.round(pre_close * 0.95, 2)\n \n else:\n high_limit = np.round(pre_close * 1.1, 2)\n low_limit = np.round(pre_close * 0.9, 2)\n \n update_requests.append(\n UpdateOne({'code':code, 'date':date, 'index':False},\n {'$set':{'high_limit':high_limit, 'low_limit':low_limit}},\n upsert=True))\n \n if len(update_requests)>0:\n update_result = DB_CONN['daily'].bulk_write(update_requests, ordered=False)\n print('涨跌停计算, 进度: (%s/%s), code:%s, 数据集:%s, 插入:%4d条, 更新:%4d条' %\n (i+1, total, code, 'daily', update_result.upserted_count, update_result.modified_count), flush=True)\n \n# print('stock: %s high low limit complish, 进度: (%s/%s)' % (code, i+1, total), flush=True)\n\n# main funciton\nif __name__ == '__main__':\n daily_col = DB_CONN['daily']\n if 'code_1_index_1' not in daily_col.index_information().keys():\n daily_col.create_index(\n [('code', ASCENDING), ('index', ASCENDING)]\n )\n start = '2015-01-01'\n end = '2018-09-30'\n tic = time.process_time()\n fixing_is_st(start, end)\n# fill_issueprice_and_timeToMarket()\n fill_high_and_low_price_between(start, end)\n toc = time.process_time()\n delta = toc - tic\n print(delta)", "# -*- coding: utf-8 -*-# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 14 19:52:31 2018\n\n@author: kite\n\"\"\"\nimport datetime, time\nfrom datetime import datetime, timedelta\nfrom pymongo import UpdateOne, ASCENDING\nfrom database import DB_CONN\nfrom stock_util import get_trading_dates# , get_all_codes\nimport tushare as ts\nimport numpy as np\n\n\"\"\"\n日K线数据的修复\n\"\"\"\n\n\ndef fill_is_trading_between(begin_date=None, end_date=None):\n \"\"\"\n 填充指定时间段内的is_trading字段\n :param begin_date: 开始日期\n :param end_date: 结束日期\n \"\"\"\n all_dates = get_trading_dates(begin_date, end_date)\n total = len(all_dates)\n for i,date in enumerate(all_dates):\n _tic = time.process_time()\n fill_single_date_is_trading(date, 'daily')\n fill_single_date_is_trading(date, 'daily_hfq')\n _toc = time.process_time()\n expect_time = (_toc - _tic) * (total - i - 1 )\n print('is_trading字段填充进度: (%s/%s), 预计还需要%.2fs' % (i+1, total, expect_time))\n\n\ndef fill_is_trading(date=None):\n \"\"\"\n 为日线数据增加is_trading字段,表示是否交易的状态,True - 交易 False - 停牌\n 从Tushare来的数据不包含交易状态,也不包含停牌的日K数据,为了系统中使用的方便,我们需要填充停牌是的K数据。\n 一旦填充了停牌的数据,那么数据库中就同时包含了停牌和交易的数据,为了区分这两种数据,就需要增加这个字段。\n\n 在填充该字段时,要考虑到是否最坏的情况,也就是数据库中可能已经包含了停牌和交易的数据,但是却没有is_trading\n 字段。这个方法通过交易量是否为0,来判断是否停牌\n \"\"\"\n\n if date is None:\n all_dates = get_trading_dates()\n else:\n all_dates = [date]\n\n for date in all_dates:\n fill_single_date_is_trading(date, 'daily')\n fill_single_date_is_trading(date, 'daily_hfq')\n\n\ndef fill_single_date_is_trading(date, collection_name):\n \"\"\"\n 填充某一个日行情的数据集的is_trading\n :param date: 日期\n :param collection_name: 集合名称\n \"\"\"\n print('填充字段, 字段名: is_trading,日期:%s,数据集:%s' %\n (date, collection_name), flush=True)\n daily_cursor = DB_CONN[collection_name].find(\n {'date': date},\n projection={'code': True, 'volume': True, '_id': False},\n batch_size=1000)\n\n update_requests = []\n for daily in daily_cursor:\n # 默认是交易\n is_trading = True\n # 如果交易量为0,则认为是停牌\n if daily['volume'] == 0:\n is_trading = False\n\n update_requests.append(\n UpdateOne(\n {'code': daily['code'], 'date': date},\n {'$set': {'is_trading': is_trading}}))\n\n if len(update_requests) > 0:\n update_result = DB_CONN[collection_name].bulk_write(update_requests, ordered=False)\n print('填充字段, 字段名: is_trading,日期:%s,数据集:%s,更新:%4d条' %\n (date, collection_name, update_result.modified_count), flush=True)\n\n\ndef fill_daily_k_at_suspension_days(begin_date=None, end_date=None):\n \"\"\"\n\n :param begin_date:\n :param end_date:\n :return:\n \"\"\"\n before = datetime.now() - timedelta(days=1)\n while 1:\n last_trading_date = before.strftime('%Y-%m-%d')\n basic_cursor = DB_CONN['basic'].find(\n {'date': last_trading_date},\n projection={'code': True, 'timeToMarket': True, '_id': False},\n batch_size=5000)\n\n basics = [basic for basic in basic_cursor]\n\n if len(basics) > 0:\n break\n\n before -= timedelta(days=1)\n\n all_dates = get_trading_dates(begin_date, end_date)\n\n fill_daily_k_at_suspension_days_at_date_one_collection(\n basics, all_dates, 'daily')\n fill_daily_k_at_suspension_days_at_date_one_collection(\n basics, all_dates, 'daily_hfq')\n\n\ndef fill_daily_k_at_suspension_days_at_date_one_collection(\n basics, all_dates, collection):\n \"\"\"\n 更新单个数据集的单个日期的数据\n :param basics:\n :param all_dates:\n :param collection:\n :return:\n \"\"\"\n code_last_trading_daily_dict = dict()\n total = len(all_dates)\n for i,date in enumerate(all_dates):\n \n _tic = time.process_time()\n\n \n update_requests = []\n last_daily_code_set = set(code_last_trading_daily_dict.keys())\n for basic in basics:\n code = basic['code']\n # 如果循环日期小于\n if date < basic['timeToMarket']:\n# print('日期:%s, %s 还没上市,上市日期: %s' % (date, code, basic['timeToMarket']), flush=True)\n pass\n else:\n # 找到当日数据\n daily = DB_CONN[collection].find_one({'code': code, 'date': date, 'index':False})\n if daily is not None:\n code_last_trading_daily_dict[code] = daily\n last_daily_code_set.add(code)\n else:\n if code in last_daily_code_set:\n last_trading_daily = code_last_trading_daily_dict[code]\n suspension_daily_doc = {\n 'code': code,\n 'date': date,\n 'close': last_trading_daily['close'],\n 'open': last_trading_daily['close'],\n 'high': last_trading_daily['close'],\n 'low': last_trading_daily['close'],\n 'volume': 0,\n 'is_trading': False\n }\n update_requests.append(\n UpdateOne(\n {'code': code, 'date': date, 'index':False},\n {'$set': suspension_daily_doc},\n upsert=True))\n \n _toc = time.process_time()\n expect_time = (_toc - _tic) * (total - i - 1 )\n if len(update_requests) > 0:\n update_result = DB_CONN[collection].bulk_write(update_requests, ordered=False)\n print('填充停牌数据进度: (%s/%s), 日期:%s,数据集:%s,插入:%4d条,更新:%4d条, 预计还需要%.2fs' %\n (i+1, total, date, collection, update_result.upserted_count, update_result.modified_count, expect_time), flush=True)\n \n\n\ndef fill_au_factor_pre_close(begin_date, end_date):\n \"\"\"\n 为daily数据集填充:\n 1. 复权因子au_factor,复权的因子计算方式:au_factor = hfq_close/close\n 2. pre_close = close(-1) * au_factor(-1)/au_factor\n :param begin_date: 开始日期\n :param end_date: 结束日期\n \"\"\"\n all_codes = ts.get_stock_basics().index.tolist() # get_all_codes()\n total = len(all_codes)\n for i,code in enumerate(all_codes):\n hfq_daily_cursor = DB_CONN['daily_hfq'].find(\n {'code': code, 'date': {'$lte': end_date, '$gte': begin_date}, 'index': False},\n sort=[('date', ASCENDING)],\n projection={'date': True, 'close': True})\n\n date_hfq_close_dict = dict([(x['date'], x['close']) for x in hfq_daily_cursor])\n\n daily_cursor = DB_CONN['daily'].find(\n {'code': code, 'date': {'$lte': end_date, '$gte': begin_date}, 'index': False},\n sort=[('date', ASCENDING)],\n projection={'date': True, 'close': True}\n )\n\n last_close = -1\n last_au_factor = -1\n \n _tic = time.process_time()\n \n update_requests = []\n for daily in daily_cursor:\n date = daily['date']\n try:\n close = daily['close']\n\n doc = dict()\n\n au_factor = np.round(date_hfq_close_dict[date] / close, 2)\n doc['au_factor'] = au_factor\n if last_close != -1 and last_au_factor != -1:\n pre_close = last_close * last_au_factor / au_factor\n doc['pre_close'] = np.round(pre_close, 2)\n\n last_au_factor = au_factor\n last_close = close\n\n update_requests.append(\n UpdateOne(\n {'code': code, 'date': date, 'index': False},\n {'$set': doc}))\n except:\n print('计算复权因子时发生错误,股票代码:%s,日期:%s' % (code, date), flush=True)\n # 恢复成初始值,防止用错\n last_close = -1\n last_au_factor = -1\n \n _toc = time.process_time()\n expect_time = (_toc - _tic) * (total - i - 1 )\n \n if len(update_requests) > 0:\n update_result = DB_CONN['daily'].bulk_write(update_requests, ordered=False)\n print('填充复权因子和前收,进度:(%s / %s), 股票:%s,更新:%4d条, 预计还需%.2fs' %\n (i+1, total, code, update_result.modified_count, expect_time), flush=True)\n\n\nif __name__ == '__main__':\n \n daily = DB_CONN['daily']\n daily_hfq = DB_CONN['daily_hfq']\n basic = DB_CONN['basic']\n if 'date_1' not in daily.index_information().keys():\n daily.create_index([('date', ASCENDING)])\n \n if 'date_1' not in daily_hfq.index_information().keys():\n daily_hfq.create_index([('date', ASCENDING)])\n \n if 'date_1' not in basic.index_information().keys():\n basic.create_index([('date', ASCENDING)])\n \n start = '2015-01-01'\n end = '2018-09-30'\n \n \n \n tic = time.process_time()\n# fill_is_trading_between(start, end)\n# fill_daily_k_at_suspension_days(start, end)\n fill_au_factor_pre_close(start, end)\n toc = time.process_time()\n print('cost time : %.2fs' % (toc-tic))" ]
[ [ "numpy.round", "pandas.read_excel" ], [ "numpy.round" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gshowalt/VirusPopModel
[ "8d41294fa06a44e8fa22ef390d6db14fba7818a1" ]
[ "Code/CarbonEquiv_Talmy.py" ]
[ "\n# importing all modules\nimport numpy as np\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom matplotlib import cm\nimport matplotlib.tri as tri\nfrom matplotlib.colors import LogNorm\nimport matplotlib.patches as mpatches\nfrom matplotlib.ticker import LogFormatter \n\nfrom collections import Counter\n\nfrom functools import wraps\n\nimport csv\nimport sys\n\nimport itertools\nfrom itertools import islice, cycle, chain\n\nimport scipy as sp\nfrom scipy.interpolate import griddata\nfrom scipy import interpolate\nfrom scipy.integrate import odeint\nfrom scipy.stats import pareto\nfrom scipy.stats import loguniform\n\nimport seaborn as sns\nimport pandas as pd\n\nimport statistics as stats\nimport lhsmdu\n\nfrom math import nan\n\nfrom SALib.sample import saltelli, latin, ff\nfrom SALib.analyze import sobol\n\nimport random\n\n\n# define the function which includes the differential equations\n# this was adapted from the leak/lyse experiment so I just left that in and set it to a final value later\n\ndef f2(s,t, leak, lyse, temp):\n \n # first define the relative contact rate (RCR) and brine concentrating factor (BCF) by temp\n if temp < -1:\n RCR = 0.0716*temp**4 + 2.9311*temp**3 + 34.108*temp**2 + 45.826*temp + 3.5125 #Fit from Wells and Deming, 2006\n BCF = -0.0106 * temp **2 - 0.519 * temp + 0.2977\n sal = 32 * BCF\n else:\n RCR = 1\n sal = 32\n \n # these are our scaling factors for the temperature-dependent parameter distributions\n mux = 1 # for growth rate\n betx = 1 # for burst size\n phix = 1e-5 # for adsorption rate\n gamx = 1 # for lytic fraction\n \n # Temp-dependent parameter distribution for burst size\n beta = betx*(0.0064 * temp**3 - 0.3047 * temp ** 2 + 0.7701 * temp + 93.605)\n # also parameterized as a curve with a standard deviation (std) for other experiments\n # but here was simply a set curve for reproducibility\n \"\"\" beta_std = 0.0095 * temp **3 - 0.5184 * temp**2 + 2.2456 * temp + 126.59\n if beta_std < 0:\n beta_std = 0.\n beta = np.random.normal(beta_mu, beta_std)\"\"\"\n\n # Temp-dependent parameter distribution for growth rate \n # (we had two different distributions, but I went with the exponential one)\n # mu = mux*(2e-5*temp**3 + 0.0008 * temp **2 + 0.0091 * temp + 0.0386)\n # mu = 3e-6*temp**4 + 0.0001*temp**3+0.0014*temp**2 + 0.0092 * temp +0.0333\n mu = 0.0441*np.exp(0.4991*temp) \n \"\"\"mu_std = 0.1*2e-5*temp**3 + 0.0009 * temp **2 + 0.0144 * temp + 0.0818\n if mu_std<0:\n mu_std = 0.001\n mu = np.random.normal(mu_mu, mu_std)\"\"\"\n\n # Temp-dependent parameter distribution for adsorption rate \n # I also tried it as a function of salinity (immediately below), but chose temp for consistency\n #phi = phix * -1e-11*sal**2 +4e-9*sal - 9e-8\n phi = phix * (6e-13 * temp **5 - 2e-11 * temp ** 4 + 1e-10 * temp ** 3 + 3e-9 * temp ** 2 - 3e-8 * temp + 5e-8)\n \"\"\"phi_std = -2e-11*sal**2 + 4e-9*sal - 9e-8\n if phi_std < 0:\n phi_std = 0\n phi = np.random.normal(phi_mu, phi_std)\"\"\"\n \n # set conditions for when curve goes below zero\n if mu <= 0:\n mu = 0.000\n if beta < 0:\n beta = 1\n if phi < 0:\n phi = 1e-15\n \n # now we want to scale adsorption rate by RCR to incorporate the sea ice \n phi = phi * RCR \n\n \n # SET PARAMETERS\n alpha = 1.2e-7*3**((temp-23)/10)#4.2e-7 at +8, or 1.2e-7 at lower temps, at -5 --> mu = 0.25/day = 0.01/hr = 1e-8\n # alpha is a coefficient that we'd like to change with temperature? Or change eta?\n #nutrient transfer coefficient to bacteria (ug/cell * hr)\n Q = 0.022\n #half saturation constant (ug/mL)\n d = 1e-8\n #constant of bacterial death (1/hr)\n m = 1e-6\n #constant of viral decay (1/hr)\n g = leak\n #POM transfer coefficient from bacteria (ug/cell*hr)\n n = lyse\n #POM transfer coefficient from viral lysis ug/[burst]cell\n #gamma is a lysogeny value\n gamma = 1 #-1/temp #*mu\n \n # set up solution matrix\n N = s[0]\n B = s[1]\n V = s[2]\n P = s[3]\n \n #systems of equations below\n \n dNdt = - alpha * (N / (N + Q)) * B + g * (alpha * (N/(N+Q))*B) + (n * 1e-7 * (gamma) * phi * V * B)\n if N < 0:\n N = 0\n dBdt = (mu) * (N/(Q + N)) * B - gamma * phi * V * B - d*B\n if B < 1:\n B = 1\n dVdt = gamma*beta * B * phi*V - phi * V * B - m*V\n if V < 1:\n V = 1\n #dPdt = (g * (0.0083*1e-7))*B + (n * 1e-7 * phi * V * B*RCR) + 1e-10*m*V + 1.0e-7*d*B - (P/(P+Q))*alpha * B\n dPdt = g * alpha * (N/ (N+Q))*B + n * 1e-7 * (gamma)*phi*B*V\n \n # according to Jover, 2014 - virus has 0.02 to 0.05 fg carbon/virion => translate into ug Carbon = 5e-11\n VCarbonEQ = 5e-11\n BCarbonEQ = 1e-7 #from Bionumbers\n \n # building the carbon equivalent for viruses, lysate as per Talmy et al 2019\n rv = 90 #virus radius (nm)\n Qv = (41 * (rv - 2.5)**3 + 130*(7.5*(rv)**2 - 18.74 * rv + 15.63)) * (10e6/(6.022 * 10**23)) # virus carbon eq\n phiEQ = (phi)/(Qv) \n Qh = 1e-7\n etav = beta * (Qv/Qh)\n \n TotalVCarbon = (phiEQ * (gamma) * (V*VCarbonEQ) * (B*BCarbonEQ))\n VirusCarbon = etav * (phiEQ * (gamma) * (V*VCarbonEQ) * (B*BCarbonEQ))\n LysateCarbon = (1-etav)*(phiEQ * (gamma) * (V*VCarbonEQ) * (B*BCarbonEQ))\n LeakCarbon = g * (alpha * (N/(N+Q))*B)\n\n \n #print (mu, beta, phi, gamma)\n return [dNdt, dBdt, dVdt, dPdt, TotalVCarbon, VirusCarbon, LysateCarbon, LeakCarbon]\n\n\n# define time, temperature scale\ntime = 5000\ntemp_list = [-12.5,-10, -8, -6, -4, -2]\nt = np.linspace(1,time,1000)\n\n# set up empty matricies\nDOMX = []\nDOMA = []\nDOMB = []\nDOMC = []\nDOM1 = []\nDOM10 = []\nDOM100 = []\n\nRCRlist = []\nMulist = []\nendvals1 = []\nendvals2 = []\nendvals3 = []\nendvals4 = []\nBurstlist = []\nAdsorplist = []\n\ncount = 0\nplt.rcParams[\"font.family\"] = \"sans-serif\"\nfig1 = plt.figure(figsize=(20,15))\nfig1.tight_layout()\nplt.rcParams.update({'font.size': 15})\n\nfor xx in temp_list:\n temp = xx\n count +=1\n mu = 0.0441*np.exp(0.4991*temp)\n gamma = 1\n #print (\"gamma is:\", gamma, \"and mu is:\", mu)\n if temp < -1:\n RCR = 0.0716*temp**4 + 2.9311*temp**3 + 34.108*temp**2 + 45.826*temp + 3.5125 #Fit from Wells and Deming, 2006\n BCF = -0.0106 * temp **2 - 0.519 * temp + 0.2977\n sal = 32 * BCF\n else:\n BCF = 1\n sal = 32\n \n s0=[0.12*BCF,1e4*BCF, 1e5*BCF,0,0,0,0,0]\n s = odeint(f2,s0,t, args = (0.4,0.99, temp))\n xend.append(sum(s[:,3]))\n \n \n y1 = s[:,4]/(0.12)\n y2 = s[:,5]/(0.12)\n y3 = s[:,6]/(0.12)\n y4 = s[:,7]/(0.12)\n \n \n plt.subplot(3, 3, count)\n\n \n colors1 = ['cadetblue', '#FF6F61'] #, 'darkblue']\n plt.stackplot(t,y2,y3, colors = colors1,labels=['To Virus','To Lysate'])\n plt.legend(loc='lower right')\n\n plt.xlabel('Temperature: {} (˚C)'.format(temp))\n plt.yscale('log')\n plt.ylabel('% Initial Nutrient')\n\n\n \n # take last value of each returned number for the temp-dependent plot \n endvals1.append(y1[-1])\n endvals2.append(y2[-1])\n endvals3.append(y3[-1])\n endvals4.append(y4[-1])\n \n # make lists of calculated temp-dependent parameters if we want to plot against them alter\n RCRlist.append(RCR)\n Mulist.append(mu)\n beta = 1*(0.0064 * temp**3 - 0.3047 * temp ** 2 + 0.7701 * temp + 93.605)\n Burstlist.append(beta)\n phi = RCR* 1 * (6e-13 * temp **5 - 2e-11 * temp ** 4 + 1e-10 * temp ** 3 + 3e-9 * temp ** 2 - 3e-8 * temp + 5e-8)\n Adsorplist.append(phi)\n\n\n\nplt.subplots_adjust(hspace = 1)\nfig1.suptitle(\"Cumulative organic carbon recycled into Virions or Lysate \",fontsize=15)\n\n# Plot as a funciton of temperature\nplt.rcParams[\"font.family\"] = \"sans-serif\"\nplt.rcParams.update({'font.size': 20})\nfig2 = plt.figure(figsize=(10,5))\nfig2.tight_layout()\n\n\nendvals1_b = [i/max(endvals1) for i in endvals1]\nendvals2_b = [i/max(endvals2) for i in endvals2]\nendvals3_b = [i/max(endvals3) for i in endvals3]\nendvals4_b = [i/max(endvals4) for i in endvals4]\n\n#ax1 = plt.stackplot(temp_list, endvals2_b, endvals3, colors = colors1) #, labels=['To Virus','To Lysate', 'Cell exudate'])\n#ax1 = plt.plot(temp_list, Burstlist)\nplt.plot(temp_list,endvals2_b, c = 'cadetblue', marker = 'o', markeredgecolor='white', markersize=15, label='to Virions')\nplt.plot(temp_list, endvals3_b, c = '#FA7268', marker = 'o', markeredgecolor='white', markersize=15, label='to Lysate') \n\nplt.xlabel('Temperature (˚C)')\nplt.ylabel('Carbon Flow (Relative to Maximum)')\nplt.legend(loc='lower right')\nfig2.suptitle(\"Cumulative organic carbon recycled into \\nVirions or Lysate as a function of temperature\\n\",fontsize=15)\n\n\n\n\n# In[88]:\n#fig1.savefig('CE_Grid_withRCR_runaway.jpeg', bbox_inches=\"tight\", dpi=300,transparent=True)\n#fig2.savefig('CE_Temp_noRCR_line.jpeg', bbox_inches=\"tight\", dpi=300,transparent=True)\n\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.linspace", "matplotlib.pyplot.yscale", "scipy.integrate.odeint", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "matplotlib.pyplot.subplots_adjust", "numpy.exp", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.stackplot", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
norheim/pextant
[ "f4235719279c0e6f178ae1e0f8b1ea3346533915" ]
[ "pextant/solvers/SEXTANTsolver.py" ]
[ "from pextant.lib.geoshapely import GeoPolygon, LONG_LAT\nimport numpy as np\nimport csv\n\nclass SEXTANTSolver(object):\n def __init__(self, environmental_model, cost_function, viz):\n self.env_model = environmental_model\n self.cost_function = cost_function\n self.viz = viz\n self.searches = []\n\n def solve(self, start_point, end_point):\n pass\n\n def solvemultipoint(self, waypoints):\n search_list = sextantSearchList(waypoints)\n for i in range(len(waypoints) - 1):\n search_result = self.solve(waypoints[i], waypoints[i + 1])\n search_list.append(search_result)\n return search_list, search_list.raw(), search_list.itemssrchd()\n\nclass sextantSearchList(object):\n def __init__(self, points):\n self.startpoint = points[0]\n self.endpoint = points[-1]\n self.waypoints = points\n self.list = []\n self.rawpoints = []\n\n def addresult(self, raw, nodes, coordinates, expanded_items):\n self.list.append(sextantSearch(raw, nodes, coordinates, expanded_items))\n\n def append(self, sextantsearch):\n self.list.append(sextantsearch)\n\n def raw(self):\n result = []\n for search in self.list:\n if search == False:\n return None\n result += search.raw\n return np.array(result)\n\n def coordinates(self):\n result = []\n for search in self.list:\n if type(search) == bool:\n return None\n result += search.coordinates.to(LONG_LAT).transpose().tolist()\n return GeoPolygon(LONG_LAT, *np.array(result).transpose())\n\n def itemssrchd(self):\n result = []\n for search in self.list:\n if type(search) == bool:\n return None\n result += search.expanded_items\n return np.array(result)\n\n def tojson(self, save=False):\n return [elt.tojson() for elt in self.list]\n\n def tocsv(self, filepath=None):\n csvlist = [elt.tocsv() for elt in self.list]\n rows = [['isStation', 'x', 'y', 'z', 'distanceMeters', 'energyJoules', 'timeSeconds']]\n for row in csvlist:\n rows += row\n if filepath:\n with open(filepath, 'wb') as csvfile:\n writer = csv.writer(csvfile)\n for row in rows:\n writer.writerow(row)\n return csvlist\n\n\nclass sextantSearch(object):\n def __init__(self, raw, nodes, coordinates, expanded_items):\n self.namemap = {\n 'time': ['timeList','totalTime'],\n 'pathlength': ['distanceList','totalDistance'],\n 'energy': ['energyList','totalEnergy']\n }\n #self.searches = []\n self.nodes = nodes\n self.raw = raw\n self.npraw = np.array(raw).transpose()\n self.coordinates = coordinates\n self.expanded_items = expanded_items\n\n def tojson(self):\n out = {}\n coordinates = self.coordinates.to(LONG_LAT).transpose().tolist()\n out[\"geometry\"] = {\n 'type': 'LineString',\n 'coordinates': coordinates\n }\n results = {}\n for k, v in self.namemap.items():\n results.update({v[0]:[],v[1]:0})\n for i, mesh_srch_elt in enumerate(self.nodes):\n derived = mesh_srch_elt.derived\n for k, v in derived.items():\n results[self.namemap[k][0]].append(v)\n for k, v in self.namemap.items():\n results[v[1]] = sum(results[v[0]])\n out[\"derivedInfo\"] = results\n return out\n\n def tocsv(self, coordstype=LONG_LAT):\n sequence = []\n coords = self.coordinates.to(coordstype).transpose().tolist()\n for i, mesh_srch_elt in enumerate(self.nodes):\n if i != 0:\n row_entry = [i==1 or i==len(coords)-1] #True if it's the first or last entry\n row_entry += coords[i] + [mesh_srch_elt.mesh_element.z]\n derived = mesh_srch_elt.derived\n row_entry += [derived['pathlength'], derived['time'], derived['energy']]\n sequence += [row_entry]\n return sequence\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Willyoung2017/doc-qa
[ "7ee02218952b0b9db63bc82b3895f743cdbd8f22" ]
[ "docqa/elmo/ablate_elmo_sub_filter.py" ]
[ "import argparse\nfrom datetime import datetime\n\nfrom tensorflow.contrib.keras.python.keras.initializers import TruncatedNormal\n\nfrom docqa import trainer\nfrom docqa.data_processing.qa_training_data import ContextLenKey\nfrom docqa.dataset import ClusteredBatcher\nfrom docqa.encoder import DocumentAndQuestionEncoder, SingleSpanAnswerEncoder, DocumentAndQuestionEncoderWithSubstring\nfrom docqa.evaluator import LossEvaluator, SpanEvaluator\nfrom docqa.elmo.elmo import ElmoLayer\nfrom docqa.elmo.lm_qa_models import AttentionWithElmo, SquadContextConcatSkip\nfrom docqa.model_dir import ModelDir\nfrom docqa.nn.attention import BiAttention, StaticAttentionSelf\nfrom docqa.nn.embedder import FixedWordEmbedder, CharWordEmbedder, LearnedCharEmbedder, LearnedSubstringEmbedder, \\\n FilteredFixedWordEmbedder\nfrom docqa.nn.layers import FullyConnected, ChainBiMapper, NullBiMapper, MaxPool, Conv1d, SequenceMapperSeq, \\\n VariationalDropoutLayer, ResidualLayer, ConcatWithProduct, MapperSeq, DropoutLayer\nfrom docqa.nn.recurrent_layers import CudnnGru\nfrom docqa.nn.similarity_layers import TriLinear\nfrom docqa.nn.span_prediction import BoundsPredictor\nfrom docqa.squad.squad_data import SquadCorpus, DocumentQaTrainingData\n\n\ndef main():\n parser = argparse.ArgumentParser(\"Train our ELMo model on SQuAD\")\n parser.add_argument(\"output_dir\")\n parser.add_argument(\"--dim\", type=int, default=90)\n parser.add_argument(\"--l2\", type=float, default=0)\n parser.add_argument(\"--mode\", choices=[\"input\", \"output\", \"both\", \"none\"], default=\"both\")\n parser.add_argument(\"--top_layer_only\", action=\"store_true\")\n #parser.add_argument(\"--combination\", choices=[\"x, y\", \"x * y\", \"x, y, x * y\"], default=\"x, y\")\n parser.add_argument(\"--use_substring\", type=str, default=\"None\")\n parser.add_argument(\"--sub_dim\", type=int, default=50)\n args = parser.parse_args()\n print(args)\n out = args.output_dir + \"-\" + datetime.now().strftime(\"%m%d-%H%M%S\")\n\n dim = args.dim\n recurrent_layer = CudnnGru(dim, w_init=TruncatedNormal(stddev=0.05))\n\n params = trainer.TrainParams(trainer.SerializableOptimizer(\"Adadelta\", dict(learning_rate=1.0)),\n ema=0.999, max_checkpoints_to_keep=2, async_encoding=10,\n num_epochs=24, log_period=30, eval_period=1200, save_period=1200,\n best_weights=(\"dev\", \"b17/text-f1\"),\n eval_samples=dict(dev=None, train=8000))\n\n lm_reduce = MapperSeq(\n ElmoLayer(args.l2, layer_norm=False, top_layer_only=args.top_layer_only),\n DropoutLayer(0.5),\n )\n CharEmbedderCls, EncoderCls = (LearnedCharEmbedder, DocumentAndQuestionEncoder) if args.use_substring == \"None\" \\\n else (LearnedSubstringEmbedder, DocumentAndQuestionEncoderWithSubstring)\n charEmbedder = CharEmbedderCls(word_size_th=14, char_th=20, char_dim=args.sub_dim, init_scale=0.05, force_cpu=True)\n if args.use_substring != None:\n charEmbedder._load_substring_vocab(args.use_substring)\n\n final_sub_dim = 100 #if args.combination == \"x, y\" else 300\n\n model = AttentionWithElmo(\n #combination=args.combination,\n encoder=EncoderCls(SingleSpanAnswerEncoder()),\n lm_model=SquadContextConcatSkip(),\n append_before_atten=(args.mode == \"both\" or args.mode == \"output\"),\n append_embed=(args.mode == \"both\" or args.mode == \"input\"),\n max_batch_size=128,\n word_embed=FilteredFixedWordEmbedder(vec_name=\"glove.840B.300d\", word_vec_init_scale=0, learn_unk=True, cpu=True),\n char_embed=CharWordEmbedder(\n charEmbedder,\n MaxPool(Conv1d(final_sub_dim, 5, 0.8)),\n shared_parameters=True\n ),\n embed_mapper=SequenceMapperSeq(\n VariationalDropoutLayer(0.8),\n recurrent_layer,\n VariationalDropoutLayer(0.8),\n ),\n lm_reduce=None,\n lm_reduce_shared=lm_reduce,\n per_sentence=False,\n memory_builder=NullBiMapper(),\n attention=BiAttention(TriLinear(bias=True), True),\n match_encoder=SequenceMapperSeq(FullyConnected(dim * 2, activation=\"relu\"),\n ResidualLayer(SequenceMapperSeq(\n VariationalDropoutLayer(0.8),\n recurrent_layer,\n VariationalDropoutLayer(0.8),\n StaticAttentionSelf(TriLinear(bias=True), ConcatWithProduct()),\n FullyConnected(dim * 2, activation=\"relu\"),\n )),\n VariationalDropoutLayer(0.8)),\n predictor = BoundsPredictor(ChainBiMapper(\n first_layer=recurrent_layer,\n second_layer=recurrent_layer\n ))\n )\n\n batcher = ClusteredBatcher(45, ContextLenKey(), False, False)\n data = DocumentQaTrainingData(SquadCorpus(), None, batcher, batcher)\n\n with open(__file__, \"r\") as f:\n notes = f.read()\n notes = str(sorted(args.__dict__.items(), key=lambda x:x[0])) + \"\\n\" + notes\n\n trainer.start_training(data, model, params,\n [LossEvaluator(), SpanEvaluator(bound=[17], text_eval=\"squad\")],\n ModelDir(out), notes)\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "tensorflow.contrib.keras.python.keras.initializers.TruncatedNormal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.2" ] } ]
Duy-Vu/stock-network
[ "3e84cfc581cd07001e86c20101c91c2f8910deb2" ]
[ "utils.py" ]
[ "import numpy as np\r\n\r\n\r\ndef clean_data(df, out_df_dir=\"\"):\r\n df.dropna(axis=1, inplace=True)\r\n\r\n if out_df_dir:\r\n df.to_csv(out_df_dir)\r\n\r\n return df\r\n\r\n\r\n# Calculate log change of daily price\r\ndef log_change(series):\r\n return np.log(series[1] / series[0])\r\n\r\n\r\n# Calculate correaltion\r\ndef calculate_cor(df, start, end):\r\n return df[start:end].rolling(\r\n window=2,\r\n min_periods=2\r\n ).apply(\r\n log_change,\r\n raw=True\r\n ).corr(method=\"pearson\")\r\n\r\n# Calculate profit\r\ndef take_profit(price, start, end):\r\n return price.iloc[end]/price.iloc[start] - 1" ]
[ [ "numpy.log" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jet-universe/particle_transformer
[ "68a7fbcd7d39a64b753251064f120462400895a1" ]
[ "networks/example_ParticleTransformer.py" ]
[ "import os\nimport torch\nfrom weaver.utils.logger import _logger\nfrom weaver.utils.import_tools import import_module\n\nParticleTransformer = import_module(\n os.path.join(os.path.dirname(__file__), 'ParticleTransformer.py'), 'ParT').ParticleTransformer\n\n\nclass ParticleTransformerWrapper(torch.nn.Module):\n def __init__(self, **kwargs) -> None:\n super().__init__()\n self.mod = ParticleTransformer(**kwargs)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'mod.cls_token', }\n\n def forward(self, points, features, lorentz_vectors, mask):\n return self.mod(features, v=lorentz_vectors, mask=mask)\n\n\ndef get_model(data_config, **kwargs):\n\n cfg = dict(\n input_dim=len(data_config.input_dicts['pf_features']),\n num_classes=len(data_config.label_value),\n # network configurations\n pair_input_dim=4,\n embed_dims=[128, 512, 128],\n pair_embed_dims=[64, 64, 64],\n num_heads=8,\n num_layers=8,\n num_cls_layers=2,\n block_params=None,\n cls_block_params={'dropout': 0, 'attn_dropout': 0, 'activation_dropout': 0},\n fc_params=[],\n activation='gelu',\n # misc\n trim=True,\n for_inference=False,\n )\n cfg.update(**kwargs)\n _logger.info('Model config: %s' % str(cfg))\n\n model = ParticleTransformerWrapper(**cfg)\n\n model_info = {\n 'input_names': list(data_config.input_names),\n 'input_shapes': {k: ((1,) + s[1:]) for k, s in data_config.input_shapes.items()},\n 'output_names': ['softmax'],\n 'dynamic_axes': {**{k: {0: 'N', 2: 'n_' + k.split('_')[0]} for k in data_config.input_names}, **{'softmax': {0: 'N'}}},\n }\n\n return model, model_info\n\n\ndef get_loss(data_config, **kwargs):\n return torch.nn.CrossEntropyLoss()\n" ]
[ [ "torch.nn.CrossEntropyLoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jessijzhao/fairscale
[ "d6a8fc6dadc5d5ab4e3ee3f42f8cd570d70d30ec", "d6a8fc6dadc5d5ab4e3ee3f42f8cd570d70d30ec" ]
[ "benchmarks/pipe.py", "fairscale/optim/grad_scaler.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport argparse\nimport logging\nimport math\nimport os\nimport time\nimport warnings\n\nfrom benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm\nimport torch\nfrom torch.distributed import rpc\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.data import DataLoader\nimport torchtext\nfrom torchtext.data.utils import get_tokenizer\n\nfrom fairscale.nn import Pipe\nfrom fairscale.nn.model_parallel import initialize_model_parallel\nfrom fairscale.nn.model_parallel.initialize import get_data_parallel_group, get_pipeline_parallel_group\nfrom fairscale.nn.pipe import LazyModule, pipe\nfrom fairscale.optim import GradScaler\nfrom fairscale.optim.oss import OSS\nfrom fairscale.utils.testing import dist_init, get_worker_map\n\ntry:\n from fairscale.optim import Adam # type: ignore\n\n can_benchmark = True\nexcept ImportError:\n from torch.optim import Adam # type: ignore\n\n can_benchmark = False\n\n\ndef init_random_seed(seed: int):\n import numpy\n\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n numpy.random.seed(seed)\n\n\nPIPE_CHUNKS = 2\niteration_count = 0\n\n\nclass EmbeddingLayer(nn.Embedding):\n def __init__(self, ntoken, ninp, initrange):\n super().__init__(ntoken, ninp)\n self.ninp = ninp\n self.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, src):\n return super().forward(src) * math.sqrt(self.ninp)\n\n\nclass PositionalEncodingLayer(nn.Module):\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncodingLayer, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer(\"pe\", pe)\n\n def forward(self, x):\n x = x + self.pe[: x.size(0), :]\n return self.dropout(x)\n\n\nclass TransformerDecoderLayer(nn.TransformerEncoderLayer):\n \"\"\"Though this class inherits from torch.nn.TransformerEncoderLayer,\n it functions as a decoder in this model\"\"\"\n\n def __init__(self, ninp, nhead, nhid, droupout):\n super().__init__(ninp, nhead, nhid, droupout)\n self.src_mask = None\n\n def _generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float(\"-inf\")).masked_fill(mask == 1, float(0.0))\n return mask\n\n def forward(self, src):\n global iteration_count\n iteration_count += 1\n # if iteration_count == 196:\n # dump_cuda_tensors()\n\n if self.src_mask is None or self.src_mask.size(0) != len(src):\n device = src.device\n mask = self._generate_square_subsequent_mask(len(src)).to(device)\n self.src_mask = mask\n\n return super().forward(src, self.src_mask)\n\n\nclass LinearLayer(nn.Linear):\n def __init__(self, ninp, ntoken, initrange):\n super().__init__(ninp, ntoken)\n self.bias.data.zero_()\n self.weight.data.uniform_(-initrange, initrange)\n\n\nclass TransformerLMSequential(nn.Sequential):\n \"\"\"A small language model based on the design of GPT-2 using nn.Sequential\n for compatability with Pipe\"\"\"\n\n def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder):\n layers = [\n EmbeddingLayer(ntokens, ninp, initrange),\n PositionalEncodingLayer(ninp, dropout),\n ]\n for _ in range(ndecoder):\n layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout))\n\n layers.append(LinearLayer(ninp, ntokens, initrange))\n super(TransformerLMSequential, self).__init__(*layers)\n\n\ndef get_data(device):\n with warnings.catch_warnings(record=True) as fjldska:\n TEXT = torchtext.data.Field(\n tokenize=get_tokenizer(\"basic_english\"), init_token=\"<sos>\", eos_token=\"<eos>\", lower=True\n )\n train_txt, val_txt, test_txt = torchtext.datasets.WikiText2.splits(TEXT)\n TEXT.build_vocab(train_txt)\n ntokens = len(TEXT.vocab.stoi)\n\n batch_size = 20\n eval_batch_size = 10\n train_data = batchify(train_txt, batch_size, TEXT, device)\n val_data = batchify(val_txt, eval_batch_size, TEXT, device)\n test_data = batchify(test_txt, eval_batch_size, TEXT, device)\n\n return ntokens, train_data, val_data, test_data\n\n\ndef batchify(data, bsz, TEXT, device):\n data = TEXT.numericalize([data.examples[0].text])\n nbatch = data.size(0) // bsz\n data = data.narrow(0, 0, nbatch * bsz)\n data = data.view(bsz, -1).t().contiguous()\n return data.to(device)\n\n\ndef get_batch(source, i, bptt):\n seq_len = min(bptt, len(source) - 1 - i)\n data = source[i : i + seq_len]\n target = source[i + 1 : i + 1 + seq_len].view(-1)\n return data, target\n\n\ndef make_model(args, device, ntokens):\n ninp = 2048 # embedding dimension\n nhid = 2048 # the dimension of the feedforward network model in nn.TransformerEncoder\n nhead = 32 # the number of heads in the multiheadattention models\n dropout = 0\n initrange = 0.1\n ndecoder = args.num_decoder_layers\n\n if args.lazy_construction:\n layers = [\n LazyModule(lambda: EmbeddingLayer(ntokens, ninp, initrange)),\n LazyModule(lambda: PositionalEncodingLayer(ninp, dropout)),\n ]\n for _ in range(ndecoder):\n layers.append(LazyModule(lambda: TransformerDecoderLayer(ninp, nhead, nhid, dropout)))\n\n layers.append(LazyModule(lambda: LinearLayer(ninp, ntokens, initrange)))\n model = layers\n else:\n model = TransformerLMSequential(ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)\n\n criterion = nn.CrossEntropyLoss()\n lr = 0.01 # learning rate\n\n def make_adam(model):\n if args.ddp_zero:\n return OSS(params=model.parameters(), optim=Adam, group=get_data_parallel_group(), lr=lr)\n else:\n return Adam(model.parameters(), lr=lr)\n\n optimizer = make_adam\n scaler = GradScaler()\n\n return model, criterion, optimizer, scaler\n\n\ndef get_tensors_by_size_bucket():\n from collections import defaultdict\n import gc\n\n size_buckets = defaultdict(int)\n for obj in gc.get_objects():\n if not isinstance(obj, torch.Tensor):\n continue\n if obj.device.type == \"cuda\":\n size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1\n\n return size_buckets\n\n\ndef dump_size_buckets(size_buckets, prefix=\"\"):\n from functools import reduce\n import operator\n\n total = 0\n for key, value in size_buckets.items():\n this = reduce(operator.mul, key) * value\n total += this\n print(prefix + f\"{key} : {value}, {this}\")\n\n print(prefix + f\"total = {total}\")\n\n\nlast_size_buckets = None\nonce = True\n\n\ndef safe_rank():\n try:\n return torch.distributed.get_rank()\n except AssertionError:\n return 0\n\n\ndef check_size_buckets():\n global last_size_buckets\n global once\n size_buckets = get_tensors_by_size_bucket()\n if last_size_buckets is not None:\n if size_buckets != last_size_buckets:\n print(f\"difference is oustanding tensors: {safe-rank()}\")\n dump_size_buckets(last_size_buckets, \"old: \")\n dump_size_buckets(size_buckets, \"new: \")\n if once:\n print(f\"dumping buckets for: {safe_rank()}\")\n dump_size_buckets(last_size_buckets, \"old: \")\n dump_size_buckets(size_buckets, \"new: \")\n once = False\n else:\n print(f\"size buckets none on {safe_rank()}\")\n last_size_buckets = size_buckets\n\n\ndef dump_cuda_tensors():\n print(f\"dumping cuda tensors...\")\n from functools import reduce\n import gc\n import operator\n\n for obj in gc.get_objects():\n if not isinstance(obj, torch.Tensor):\n continue\n if obj.device.type == \"cuda\":\n size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1\n\n print(f\"outstanding cuda tensors:\")\n total = 0\n for key, value in size_buckets.items():\n this = reduce(operator.mul, key) * value\n total += this\n print(f\"{key} : {value}, {this}\")\n print(f\"total size = {total}\")\n\n import pprint\n\n pprint.pprint(torch.cuda.memory_stats())\n\n\ndef train(lm_dataloader, model, criterion, optimizer, vocab_size, args):\n model.train()\n from functools import reduce\n import operator\n\n num_params = reduce(operator.add, (reduce(operator.mul, x.size()) for x in model.parameters()))\n if model.group:\n total = torch.Tensor([num_params])\n if torch.cuda.is_available():\n total = total.cuda()\n torch.distributed.all_reduce(total, group=model.group)\n logging.info(\n f\"training model, #prams = {num_params}, group: {model.group.rank()}, grank:\"\n f\" {torch.distributed.get_rank()}, sizes {model.group.size()}\"\n )\n torch.distributed.barrier()\n if model.group.rank() == 0:\n logging.info(f\"total #prams = {total.item()}\")\n else:\n logging.info(f\"training model, #prams = {num_params}\")\n vocab_size = 10000 # FIXME\n total_loss = 0.0\n start_time = time.time()\n word_counter = 0\n\n optimizer = optimizer(model)\n\n def get_first_device(model):\n if isinstance(model, DDP):\n model = model.module\n\n if not torch.cuda.is_available():\n return torch.device(\"cpu\")\n if model.devices:\n return model.devices[0]\n else:\n return torch.cuda.current_device()\n\n def get_last_device(model):\n if isinstance(model, DDP):\n model = model.module\n\n if not torch.cuda.is_available():\n return torch.device(\"cpu\")\n if model.devices:\n return model.devices[-1]\n else:\n return torch.cuda.current_device()\n\n pipe_group = model.group\n\n if args.ddp_zero:\n model = DDP(\n model,\n device_ids=[torch.cuda.current_device()],\n process_group=get_data_parallel_group(),\n find_unused_parameters=False,\n )\n\n if pipe_group and pipe_group.rank() != 0 and pipe_group.rank() != (pipe_group.size() - 1):\n thing = {\"input\": torch.zeros(args.batch_size)}\n\n class FakeDataset:\n def __getitem__(self, index):\n return thing\n\n def __len__(self):\n return len(lm_dataloader)\n\n lm_dataloader = FakeDataset()\n\n for i, batch in enumerate(lm_dataloader):\n bi = batch[\"input\"]\n if args.max_batch and i > args.max_batch:\n break\n optimizer.zero_grad()\n try:\n if (pipe_group is None or pipe_group.rank() == 0) and not args.ddp_zero:\n tmp = batch[\"input\"].to(get_first_device(model))\n output = model(tmp)\n else:\n output = model(batch[\"input\"])\n except Exception as e:\n raise RuntimeError(f\"training failed on {torch.distributed.get_rank()}\") from e\n\n if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:\n target = batch[\"target\"].to(get_last_device(model))\n output = output.to(target.device)\n\n loss = criterion(output.view(-1, vocab_size), target.view(-1))\n if args.ddp_zero:\n ddp_group = get_data_parallel_group()\n torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.SUM, group=ddp_group)\n loss /= ddp_group.size()\n loss.backward()\n del target\n else:\n if args.ddp_zero:\n model.module.back_helper(output)\n else:\n model.back_helper(output)\n\n del output\n\n torch.nn.utils.clip_grad_value_(model.parameters(), 0.05)\n optimizer.step()\n\n if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:\n total_loss += loss.item()\n log_interval = 1\n word_counter += batch[\"ntokens\"]\n if i % log_interval == 0 and i > 0:\n cur_loss = total_loss / log_interval\n elapsed = time.time() - start_time\n print(\n \"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}\".format(\n i, word_counter / elapsed, cur_loss, math.exp(cur_loss)\n )\n )\n word_counter = 0\n total_loss = 0\n start_time = time.time()\n # if i >= 10:\n # break\n # torch.cuda.empty_cache()\n # check_size_buckets()\n\n\ndef evaluate(eval_model, data_source, criterion, bptt, ntokens):\n eval_model.eval()\n total_loss = 0.0\n with torch.no_grad():\n for i in range(0, data_source.size(0) - 1, bptt):\n data, targets = get_batch(data_source, i, bptt)\n output = eval_model(data)\n output = output.to(targets.device)\n output_flat = output.view(-1, ntokens)\n total_loss += len(data) * criterion(output_flat, targets).item()\n return total_loss / (len(data_source) - 1)\n\n\ndef get_number_of_words(data):\n return data.size()[0] * data.size()[1]\n\n\ndef benchmark_language_model(train_data, val_data, test_data, model, criterion, optimizer, ntokens, args):\n epoch = 1\n bptt = 35\n start_time = time.time()\n\n print(\"-\" * 110)\n print(\"| start of epoch {:1d}\".format(epoch))\n print(\"-\" * 110)\n epoch_start_time = time.time()\n train(train_data, model, criterion, optimizer, bptt, ntokens, args)\n val_loss = 1 # evaluate(model, val_data, criterion, bptt, ntokens)\n print(\"-\" * 89)\n print(\n \"| end of epoch {:1d} | time: {:5.2f}s | valid loss {:5.2f} \".format(\n epoch, (time.time() - epoch_start_time), val_loss\n )\n )\n print(\"-\" * 110)\n\n elapsed_time = time.time() - start_time\n nwords = get_number_of_words(train_data) + get_number_of_words(val_data)\n wps = nwords / elapsed_time\n\n test_loss = 1 # evaluate(model, test_data, criterion, bptt, ntokens)\n print(\"=\" * 89)\n print(\n \"| end of training | test loss {:5.2f} \\n| time: {:5.2f}s | words: {:3d} | wps: {:5.2f}\".format(\n test_loss, elapsed_time, nwords, wps\n )\n )\n print(\"=\" * 110)\n\n if can_benchmark and len(model.balance) == 4:\n # Assert that words per second is within 3 standard deviations of the average\n # of six golden runs\n assert wps > 36954.4 - (3 * 116.825)\n\n print(\"Peak allocated bytes on cuda:0: {:1d}\".format(torch.cuda.memory_stats(0)[\"allocated_bytes.all.peak\"]))\n print(\"Peak allocated bytes on cuda:1: {:1d}\".format(torch.cuda.memory_stats(1)[\"allocated_bytes.all.peak\"]))\n print(\"Peak allocated bytes on cuda:2: {:1d}\".format(torch.cuda.memory_stats(2)[\"allocated_bytes.all.peak\"]))\n print(\"Peak allocated bytes on cuda:3: {:1d}\".format(torch.cuda.memory_stats(3)[\"allocated_bytes.all.peak\"]))\n\n # Assert that memory usage on each GPU is within 10% of golden run\n # Right-hand-side is golden run bytes * 110%\n assert torch.cuda.memory_stats(0)[\"allocated_bytes.all.peak\"] < 4061909504 * 1.1\n assert torch.cuda.memory_stats(1)[\"allocated_bytes.all.peak\"] < 4050944 * 1.1\n assert torch.cuda.memory_stats(2)[\"allocated_bytes.all.peak\"] < 10427392 * 1.1\n assert torch.cuda.memory_stats(3)[\"allocated_bytes.all.peak\"] < 2031824896 * 1.1\n print(\"No regression detected\")\n\n\ndef generate_balance_weighted(num_devices, num_layers, fraction=0.5):\n balance = []\n layers_assigned = 0\n average_count = num_layers / num_devices\n last_layers = int(average_count * fraction)\n\n balance = generate_balance(num_devices - 1, num_layers - last_layers)\n balance.append(last_layers)\n return balance\n\n\ndef generate_balance(num_devices, num_layers):\n balance = []\n layers_assigned = 0\n for i in range(num_devices):\n x = (num_layers - layers_assigned) / (num_devices - i)\n if x.is_integer():\n balance.append(int(x))\n layers_assigned += x\n else:\n balance.append(math.ceil(x))\n layers_assigned += math.ceil(x)\n return balance\n\n\ndef make_model_and_data(args, device, new_data: bool = True):\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if new_data:\n vocab_size = 10000\n model, criterion, optimizer, scaler = make_model(args, device, vocab_size)\n lm_dataset = BenchmarkLMDataset()\n lm_dataloader = DataLoader(\n lm_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_sentences_lm\n )\n return {\n \"model\": model,\n \"criterion\": criterion,\n \"optimizer\": optimizer,\n \"data\": lm_dataloader,\n \"vocab_size\": vocab_size,\n }\n else:\n data = get_data(device)\n ntokens, train_data, val_data, test_data = data\n model, criterion, optimizer, scaler = make_model(args, device, ntokens)\n return {\n \"model\": model,\n \"criterion\": criterion,\n \"optimizer\": optimizer,\n \"data\": data,\n }\n\n\ndef bench_single_process(args):\n num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1\n assert num_devices > 0\n init_random_seed(0)\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n new_data = True\n\n blob = make_model_and_data(args, None, new_data=new_data)\n model = blob[\"model\"]\n\n balance = generate_balance(min(num_devices, 4), len(model))\n p = pipe.Pipe(\n model, balance, chunks=args.chunks, pipelined_backward=args.pipelined_backward, checkpoint=args.checkpoint\n )\n del model\n del blob[\"model\"]\n\n if new_data:\n train(blob[\"data\"], p, blob[\"criterion\"], blob[\"optimizer\"], blob[\"vocab_size\"], args)\n else:\n ntokens, train_data, val_data, test_data = blob[\"data\"]\n benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)\n\n\ndef run_mp_worker(args, available_workers):\n new_data = True\n\n blob = make_model_and_data(args, None, new_data=new_data)\n model = blob[\"model\"]\n\n balance = generate_balance_weighted(get_pipeline_parallel_group().size(), len(model), 0.8)\n p = pipe.Pipe(\n model,\n balance,\n style=Pipe.AsyncSchedule,\n chunks=args.chunks,\n worker_map=get_worker_map(),\n input_device=torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\"),\n pipelined_backward=args.pipelined_backward,\n checkpoint=args.checkpoint,\n # loss_fn=blob[\"criterion\"],\n )\n if torch.cuda.is_available():\n p = p.cuda()\n if args.all_at_once and p.pipeline:\n print(f\"running all at once\")\n p.pipeline.all_at_once = True\n\n if new_data:\n train(blob[\"data\"], p, blob[\"criterion\"], blob[\"optimizer\"], blob[\"vocab_size\"], args)\n else:\n ntokens, train_data, val_data, test_data = blob[\"data\"]\n benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)\n\n\ndef run_worker(rank, world_size, args):\n if args.world_size != 0:\n world_size = args.world_size\n dist_init(rank + args.rank_base, world_size, hostname=args.host)\n initialize_model_parallel(1, world_size)\n init_random_seed(0)\n run_mp_worker(args, world_size)\n\n rpc.shutdown()\n torch.distributed.destroy_process_group()\n\n\ndef bench_multi_process(args, all_at_once=False):\n if args.local_world_size != 0:\n world_size = args.local_world_size\n else:\n world_size = min(torch.cuda.device_count(), 2)\n mp.spawn(run_worker, args=(world_size, args), nprocs=world_size, join=True)\n\n\nbest_device_map = {\n 0: \"mlx5_0:1\",\n 1: \"mlx5_0:1\",\n 2: \"mlx5_1:1\",\n 3: \"mlx5_1:1\",\n 4: \"mlx5_2:1\",\n 5: \"mlx5_2:1\",\n 6: \"mlx5_3:1\",\n 7: \"mlx5_3:1\",\n}\n\n\ndef bench_mpi(args):\n guess_rank = int(os.environ[\"OMPI_COMM_WORLD_RANK\"])\n world_size = int(os.environ[\"OMPI_COMM_WORLD_SIZE\"])\n local_rank = int(os.environ[\"OMPI_COMM_WORLD_LOCAL_RANK\"])\n os.environ[\"UCX_NET_DEVICES\"] = best_device_map[local_rank]\n\n os.environ[\"MASTER_ADDR\"] = args.host\n os.environ[\"MASTER_PORT\"] = \"10638\"\n if args.socket_name:\n os.environ[\"GLOO_SOCKET_IFNAME\"] = args.socket_name\n os.environ[\"TP_SOCKET_IFNAME\"] = args.socket_name\n\n torch.distributed.init_process_group(backend=\"gloo\", rank=guess_rank, world_size=world_size)\n\n os.environ[\"MASTER_ADDR\"] = args.host\n os.environ[\"MASTER_PORT\"] = \"10639\"\n init_method = f\"tcp://{os.environ['MASTER_ADDR']}:{os.environ['MASTER_PORT']}\"\n rank = torch.distributed.get_rank()\n world_size = torch.distributed.get_world_size()\n torch.cuda.set_device(local_rank % torch.cuda.device_count())\n\n rpc.init_rpc(\n f\"Test{rank}\",\n rank=rank,\n world_size=world_size,\n backend=rpc.BackendType.PROCESS_GROUP,\n rpc_backend_options=rpc.ProcessGroupRpcBackendOptions(rpc_timeout=20, init_method=init_method),\n )\n\n backends = {\"model_parallel_backend\": \"nccl\", \"pipeline_backend\": \"mpi\", \"ddp_backend\": \"nccl\"}\n\n if args.ddp_zero:\n initialize_model_parallel(1, 4, **backends)\n else:\n initialize_model_parallel(1, world_size, **backends)\n init_random_seed(0)\n\n run_mp_worker(args, world_size)\n\n rpc.shutdown()\n torch.distributed.destroy_process_group()\n\n\nparser = argparse.ArgumentParser(description=\"benchmark\")\nparser.add_argument(\"--local-world-size\", \"-l\", type=int, default=0, help=\"local world size\")\nparser.add_argument(\"--world-size\", \"-w\", type=int, default=0, help=\"world size\")\nparser.add_argument(\"--rank-base\", \"-r\", type=int, help=\"rank base\", default=0)\nparser.add_argument(\"--host\", \"-o\", type=str, default=\"localhost\", help=\"hostname\")\nparser.add_argument(\"--no-mpi\", action=\"store_true\", default=False, help=\"disable mpi\")\nparser.add_argument(\"--chunks\", type=int, default=1, help=\"number of microbatches per batch\")\nparser.add_argument(\"--batch-size\", type=int, default=8, help=\"size of a batch\")\nparser.add_argument(\"--all-at-once\", action=\"store_true\", default=False, help=\"do backward pass on whole batch at once\")\nparser.add_argument(\"--max-batch\", type=int, default=4, help=\"Max number of batches\")\nparser.add_argument(\"--socket-name\", type=str, default=None, help=\"socket ifname for gloo/tp\")\nparser.add_argument(\"--num-decoder-layers\", type=int, default=10, help=\"Number of decoder layers in the model\")\nparser.add_argument(\"--ddp-zero\", action=\"store_true\", default=False, help=\"enable ddp\")\nparser.add_argument(\n \"--lazy-construction\", action=\"store_true\", default=False, help=\"Number of decoder layers in the model\"\n)\nparser.add_argument(\n \"--checkpoint\", default=\"never\", choices=[\"always\", \"except_last\", \"never\"], help=\"Checkpointing strategy for pipe\"\n)\nparser.add_argument(\n \"--pipelined-backward\", dest=\"pipelined_backward\", action=\"store_true\", help=\"Pipelined backward pass\"\n)\nparser.add_argument(\n \"--no-pipelined-backward\", dest=\"pipelined_backward\", action=\"store_false\", help=\"Pipelined backward pass\"\n)\nparser.set_defaults(pipelined_backward=True)\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n # bench_multi_process(args, all_at_once=True)\n if args.no_mpi or \"OMPI_COMM_WORLD_RANK\" not in os.environ:\n print(f\"Running benchmark with args: {args}\")\n bench_single_process(args)\n else:\n if os.environ[\"OMPI_COMM_WORLD_RANK\"] == \"0\":\n print(f\"Running benchmark with args: {args}\")\n bench_mpi(args)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nfrom typing import Dict\n\nimport torch\nfrom torch.cuda.amp import GradScaler as TorchGradScaler\nimport torch.distributed as dist\nfrom torch.optim import Optimizer\n\nfrom .oss import OSS\n\n\nclass GradScaler(TorchGradScaler):\n def _unscale_grads_(\n self, optimizer: Optimizer, inv_scale: torch.Tensor, found_inf: torch.Tensor, allow_fp16: bool\n ) -> Dict[torch.device, torch.Tensor]:\n return super()._unscale_grads_(optimizer, inv_scale, found_inf, True)\n\n\nclass ShardedGradScaler(TorchGradScaler):\n \"\"\"\n A shard-aware :class:`GradScaler<torch.cuda.amp.GradScaler>`, to be used in conjunction with\n :class:`OSS` and :class:`ShardedOptimizer`.\n\n Interface and usecases are not changed, more explanations can be found in the corresponding pytorch\n documentation https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.display_warning = True\n\n def unscale_(self, optimizer: Optimizer) -> None:\n # Could be a mistake, this scaler is supposed to work with ZeroRedundancyOptimizer only\n if self.display_warning and not isinstance(optimizer, OSS):\n logging.warning(\n \"ShardedGradScaler is to be used in combination with a sharded optimizer, this could not be checked\"\n )\n\n self.display_warning = False # Only warn once\n\n # Call the upstream unscale_ method which will only act on this rank's gradients\n super().unscale_(optimizer)\n\n # Synchronize the detected inf across the ranks\n optimizer_state = self._per_optimizer_states[id(optimizer)]\n handles = [dist.all_reduce(v, async_op=True) for v in optimizer_state[\"found_inf_per_device\"].values()]\n\n # Make sure that the calls are done before moving out\n _ = list(map(lambda x: x.wait(), handles))\n" ]
[ [ "torch.multiprocessing.spawn", "torch.zeros", "torch.sin", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.is_available", "torch.device", "torch.distributed.get_rank", "torch.nn.CrossEntropyLoss", "torch.nn.Dropout", "torch.ones", "torch.distributed.init_process_group", "torch.distributed.rpc.ProcessGroupRpcBackendOptions", "torch.distributed.barrier", "torch.cuda.memory_stats", "torch.arange", "torch.cos", "torch.cuda.current_device", "torch.distributed.rpc.shutdown", "torch.distributed.destroy_process_group", "torch.distributed.get_world_size", "torch.cuda.device_count", "numpy.random.seed", "torch.cuda.manual_seed", "torch.Tensor", "torch.manual_seed", "torch.distributed.all_reduce" ], [ "torch.distributed.all_reduce" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
konstmish/opt_methods
[ "ae73d9bd89ae5c463e70328d73cbd190175df98c" ]
[ "loss_functions/loss_oracle.py" ]
[ "import copy\nimport numpy as np\nimport warnings\n\nfrom .regularizer import Regularizer\n \n\nclass Oracle():\n \"\"\"\n Base class for all objectives. Can provide objective values,\n gradients and its Hessians as functions that take parameters as input.\n Takes as input the values of l1 and l2 regularization.\n \"\"\"\n def __init__(self, l1=0, l2=0, l2_in_prox=False, regularizer=None, seed=42):\n if l1 < 0.0:\n raise ValueError(\"Invalid value for l1 regularization: {}\".format(l1))\n if l2 < 0.0:\n raise ValueError(\"Invalid value for l2 regularization: {}\".format(l2))\n if l2 == 0. and l2_in_prox:\n warnings.warn(\"The value of l2 is set to 0, so l2_in_prox is changed to False.\")\n l2_in_prox = False\n self.l1 = l1\n self.l2 = 0 if l2_in_prox else l2\n self.l2_in_prox = l2_in_prox\n self.x_opt = None\n self.f_opt = np.inf\n self.regularizer = regularizer\n self.seed = seed\n \n if (l1 > 0 or l2_in_prox) and regularizer is None:\n l2_prox = l2 if l2_in_prox else 0\n self.regularizer = Regularizer(l1=l1, l2=l2_prox)\n self.rng = np.random.default_rng(seed)\n self._smoothness = None\n self._max_smoothness = None\n self._ave_smoothness = None\n self._importance_probs = None\n self._individ_smoothness = None\n \n def value(self, x):\n value = self._value(x)\n if self.regularizer is not None:\n value += self.regularizer(x)\n if value < self.f_opt:\n self.x_opt = copy.deepcopy(x)\n self.f_opt = value\n return value\n \n def gradient(self, x):\n pass\n \n def hessian(self, x):\n pass\n \n def hess_vec_prod(self, x, v, grad_dif=False, eps=None):\n pass\n \n @property\n def smoothness(self):\n pass\n \n @property\n def max_smoothness(self):\n pass\n \n @property\n def average_smoothness(self):\n pass\n\n def batch_smoothness(self, batch_size):\n pass\n \n @staticmethod\n def norm(x):\n pass\n \n @staticmethod\n def inner_prod(x, y):\n pass\n \n @staticmethod\n def outer_prod(x, y):\n pass\n \n @staticmethod\n def is_equal(x, y):\n pass\n" ]
[ [ "numpy.random.default_rng" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhongwen/jax
[ "76d2a87915863d3a32732837cc7bf61b7b2f9e5b" ]
[ "tests/lax_numpy_test.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nfrom functools import partial\nimport itertools\nimport operator\nimport unittest\nfrom unittest import SkipTest\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport six\n\nimport numpy as onp\n\nimport jax.ops\nfrom jax import api\nfrom jax import lax\nfrom jax import numpy as lnp\nfrom jax import test_util as jtu\nfrom jax.lib import xla_bridge\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\nFLAGS = config.FLAGS\n\nnonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]\nnonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes\nempty_array_shapes = [(0,), (0, 4), (3, 0),]\n\nscalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]\narray_shapes = nonempty_array_shapes + empty_array_shapes\nnonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes\nnonempty_shapes = scalar_shapes + nonempty_array_shapes\nall_shapes = scalar_shapes + array_shapes\n\nfloat_dtypes = [onp.float32, onp.float64]\ncomplex_dtypes = [onp.complex64, onp.complex128]\nint_dtypes = [onp.int32, onp.int64]\nunsigned_dtypes = [onp.uint32, onp.uint64]\nbool_dtypes = [onp.bool_]\ndefault_dtypes = float_dtypes + int_dtypes\ninexact_dtypes = float_dtypes + complex_dtypes\nnumber_dtypes = float_dtypes + complex_dtypes + int_dtypes\nall_dtypes = number_dtypes + bool_dtypes\n\nOpRecord = collections.namedtuple(\n \"OpRecord\",\n [\"name\", \"nargs\", \"dtypes\", \"shapes\", \"rng\", \"diff_modes\", \"test_name\",\n \"check_dtypes\"])\n\n\ndef op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None,\n check_dtypes=True):\n test_name = test_name or name\n return OpRecord(name, nargs, dtypes, shapes, rng, diff_modes, test_name,\n check_dtypes)\n\nJAX_ONE_TO_ONE_OP_RECORDS = [\n op_record(\"abs\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"add\", 2, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"ceil\", 1, float_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"conj\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"equal\", 2, all_dtypes, all_shapes, jtu.rand_some_equal(), []),\n op_record(\"exp\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"fabs\", 1, float_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"float_power\", 2, inexact_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"floor\", 1, float_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"greater\", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),\n op_record(\"greater_equal\", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),\n op_record(\"less\", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),\n op_record(\"less_equal\", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),\n op_record(\"log\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"logical_and\", 2, all_dtypes, all_shapes, jtu.rand_bool(), []),\n op_record(\"logical_not\", 1, all_dtypes, all_shapes, jtu.rand_bool(), []),\n op_record(\"logical_or\", 2, all_dtypes, all_shapes, jtu.rand_bool(), []),\n op_record(\"logical_xor\", 2, all_dtypes, all_shapes, jtu.rand_bool(), []),\n op_record(\"maximum\", 2, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"minimum\", 2, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"multiply\", 2, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"negative\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"not_equal\", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), [\"rev\"]),\n op_record(\"array_equal\", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), [\"rev\"]),\n op_record(\"reciprocal\", 1, inexact_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"subtract\", 2, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"sin\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"cos\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"tan\", 1, number_dtypes, all_shapes, jtu.rand_uniform(-1.5, 1.5),\n [\"rev\"]),\n op_record(\"sinh\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"cosh\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"tanh\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"arcsin\", 1, float_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n op_record(\"arccos\", 1, float_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n op_record(\"arctan\", 1, float_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n op_record(\"arctan2\", 2, float_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n op_record(\"arcsinh\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"arccosh\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"arctanh\", 1, number_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n]\n\nJAX_COMPOUND_OP_RECORDS = [\n # angle has inconsistent 32/64-bit return types across numpy versions.\n op_record(\"angle\", 1, number_dtypes, all_shapes, jtu.rand_default(), [],\n check_dtypes=False),\n op_record(\"atleast_1d\", 1, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"atleast_2d\", 1, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"atleast_3d\", 1, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"cbrt\", 1, default_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"conjugate\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"deg2rad\", 1, float_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"divide\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), [\"rev\"]),\n op_record(\"exp2\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"expm1\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [],\n test_name=\"expm1_large\"),\n op_record(\"expm1\", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), []),\n op_record(\"fix\", 1, float_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"floor_divide\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), [\"rev\"]),\n op_record(\"heaviside\", 2, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"hypot\", 2, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"kron\", 2, number_dtypes, nonempty_shapes, jtu.rand_default(), []),\n op_record(\"outer\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"imag\", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"iscomplex\", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"isfinite\", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),\n op_record(\"isinf\", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),\n op_record(\"isnan\", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),\n op_record(\"isneginf\", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),\n op_record(\"isposinf\", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),\n op_record(\"isreal\", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"isrealobj\", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"log2\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"log10\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"log1p\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [],\n test_name=\"log1p_large\"),\n op_record(\"log1p\", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), []),\n op_record(\"logaddexp\", 2, float_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"logaddexp2\", 2, float_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"polyval\", 2, number_dtypes, nonempty_nonscalar_array_shapes, jtu.rand_default(), []),\n op_record(\"positive\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"power\", 2, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"rad2deg\", 1, float_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"ravel\", 1, all_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"real\", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"remainder\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"mod\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"sinc\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"square\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"sqrt\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"transpose\", 1, all_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"true_divide\", 2, all_dtypes, all_shapes, jtu.rand_nonzero(), [\"rev\"]),\n op_record(\"where\", 3, (onp.float32, onp.int64), all_shapes, jtu.rand_some_zero(), []),\n op_record(\"diff\", 1, number_dtypes, nonzerodim_shapes, jtu.rand_default(), [\"rev\"]),\n]\n\nJAX_BITWISE_OP_RECORDS = [\n op_record(\"bitwise_and\", 2, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_bool(), []),\n op_record(\"bitwise_not\", 1, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_bool(), []),\n op_record(\"bitwise_or\", 2, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_bool(), []),\n op_record(\"bitwise_xor\", 2, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_bool(), []),\n]\n\nJAX_REDUCER_RECORDS = [\n op_record(\"mean\", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),\n op_record(\"prod\", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), []),\n op_record(\"sum\", 1, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"var\", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),\n op_record(\"std\", 1, inexact_dtypes, nonempty_shapes, jtu.rand_default(), []),\n]\n\nJAX_REDUCER_NO_DTYPE_RECORDS = [\n op_record(\"all\", 1, all_dtypes, all_shapes, jtu.rand_some_zero(), []),\n op_record(\"any\", 1, all_dtypes, all_shapes, jtu.rand_some_zero(), []),\n op_record(\"max\", 1, all_dtypes, nonempty_shapes, jtu.rand_default(), []),\n op_record(\"min\", 1, all_dtypes, nonempty_shapes, jtu.rand_default(), []),\n]\n\nJAX_ARGMINMAX_RECORDS = [\n op_record(\"argmin\", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal(), []),\n op_record(\"argmax\", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal(), []),\n]\n\nJAX_OPERATOR_OVERLOADS = [\n op_record(\"__add__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__sub__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__mul__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__eq__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__ne__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__lt__\", 2, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__gt__\", 2, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__ge__\", 2, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__neg__\", 1, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__pow__\", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), []),\n op_record(\"__mod__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"__floordiv__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"__truediv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"__abs__\", 1, number_dtypes, all_shapes, jtu.rand_default(), []),\n # TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2\n op_record(\"__invert__\", 1, int_dtypes, all_shapes, jtu.rand_default(), []),\n # TODO(mattjj): investigate these failures\n # op_record(\"__or__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n # op_record(\"__and__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n # op_record(\"__xor__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n # op_record(\"__divmod__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n # TODO(mattjj): lshift, rshift\n]\n\nJAX_RIGHT_OPERATOR_OVERLOADS = [\n op_record(\"__radd__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__rsub__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__rmul__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__rpow__\", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), []),\n op_record(\"__rmod__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"__rfloordiv__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"__rtruediv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n # op_record(\"__ror__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n # op_record(\"__rand__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n # op_record(\"__rxor__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n # op_record(\"__rdivmod__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n]\n\nnumpy_version = tuple(map(int, onp.version.version.split('.')))\nif numpy_version >= (1, 15):\n JAX_COMPOUND_OP_RECORDS += [\n op_record(\"isclose\", 2, all_dtypes, all_shapes, jtu.rand_small_positive(), []),\n op_record(\"gcd\", 2, int_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"lcm\", 2, int_dtypes, all_shapes, jtu.rand_default(), []),\n ]\n JAX_REDUCER_NO_DTYPE_RECORDS += [\n op_record(\"ptp\", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),\n ]\n\nif six.PY2:\n JAX_OPERATOR_OVERLOADS += [\n op_record(\"__div__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n ]\n JAX_RIGHT_OPERATOR_OVERLOADS += [\n op_record(\"__rdiv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n ]\n\n\nCombosWithReplacement = itertools.combinations_with_replacement\n\n\ndef _dtypes_are_compatible_for_bitwise_ops(args):\n if len(args) <= 1:\n return True\n is_signed = lambda dtype: onp.issubdtype(dtype, onp.signedinteger)\n width = lambda dtype: onp.iinfo(dtype).bits\n x, y = args\n if width(x) > width(y):\n x, y = y, x\n # The following condition seems a little ad hoc, but seems to capture what\n # numpy actually implements.\n return (\n is_signed(x) == is_signed(y)\n or (width(x) == 32 and width(y) == 32)\n or (width(x) == 32 and width(y) == 64 and is_signed(y)))\n\ndef _shapes_are_broadcast_compatible(shapes):\n accumulator = onp.zeros([])\n for shape in shapes:\n try:\n accumulator = accumulator + onp.zeros(shape)\n except ValueError:\n return False\n return True\n\n\nclass LaxBackedNumpyTests(jtu.JaxTestCase):\n \"\"\"Tests for LAX-backed Numpy implementation.\"\"\"\n\n def _GetArgsMaker(self, rng, shapes, dtypes):\n return lambda: [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(rec.test_name, shapes,\n dtypes),\n \"rng\": rec.rng, \"shapes\": shapes, \"dtypes\": dtypes,\n \"onp_op\": getattr(onp, rec.name), \"lnp_op\": getattr(lnp, rec.name),\n \"check_dtypes\": rec.check_dtypes}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n CombosWithReplacement(rec.shapes, rec.nargs))\n for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))\n for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,\n JAX_COMPOUND_OP_RECORDS)))\n def testOp(self, onp_op, lnp_op, rng, shapes, dtypes, check_dtypes):\n args_maker = self._GetArgsMaker(rng, shapes, dtypes)\n py_scalar_arg = jtu.PYTHON_SCALAR_SHAPE in shapes\n self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,\n check_dtypes=check_dtypes and not py_scalar_arg)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=check_dtypes)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(rec.test_name, shapes,\n dtypes),\n \"rng\": rec.rng, \"shapes\": shapes, \"dtypes\": dtypes, \"name\": rec.name}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n CombosWithReplacement(rec.shapes, rec.nargs))\n for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))\n for rec in JAX_OPERATOR_OVERLOADS))\n def testOperatorOverload(self, name, rng, shapes, dtypes):\n args_maker = self._GetArgsMaker(rng, shapes, dtypes)\n fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)\n self._CompileAndCheck(fun, args_maker,\n check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(rec.test_name, shapes,\n dtypes),\n \"rng\": rec.rng, \"shapes\": shapes, \"dtypes\": dtypes, \"name\": rec.name}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n CombosWithReplacement(rec.shapes, rec.nargs))\n for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))\n for rec in JAX_RIGHT_OPERATOR_OVERLOADS))\n def testRightOperatorOverload(self, name, rng, shapes, dtypes):\n if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:\n raise SkipTest() # TODO(mattjj): clean up\n args_maker = self._GetArgsMaker(rng, shapes, dtypes)\n fun = lambda fst, snd: getattr(snd, name)(fst)\n self._CompileAndCheck(fun, args_maker,\n check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\n rec.test_name, shapes, dtypes),\n \"rng\": rec.rng, \"shapes\": shapes, \"dtypes\": dtypes,\n \"onp_op\": getattr(onp, rec.name), \"lnp_op\": getattr(lnp, rec.name)}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n CombosWithReplacement(rec.shapes, rec.nargs))\n for dtypes in filter(\n _dtypes_are_compatible_for_bitwise_ops,\n CombosWithReplacement(rec.dtypes, rec.nargs)))\n for rec in JAX_BITWISE_OP_RECORDS))\n def testBitwiseOp(self, onp_op, lnp_op, rng, shapes, dtypes):\n if not FLAGS.jax_enable_x64 and any(\n onp.iinfo(dtype).bits == 64 for dtype in dtypes):\n self.skipTest(\"x64 types are disabled by jax_enable_x64\")\n args_maker = self._GetArgsMaker(rng, shapes, dtypes)\n self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,\n check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"{}_inshape={}_axis={}_dtype={}_keepdims={}\".format(\n rec.test_name.capitalize(),\n jtu.format_shape_dtype_string(shape, dtype), axis,\n \"None\" if out_dtype is None else onp.dtype(out_dtype).name, keepdims),\n \"rng\": rec.rng, \"shape\": shape, \"dtype\": dtype, \"out_dtype\": out_dtype,\n \"onp_op\": getattr(onp, rec.name), \"lnp_op\": getattr(lnp, rec.name),\n \"axis\": axis, \"keepdims\": keepdims}\n for rec in JAX_REDUCER_RECORDS\n for shape in rec.shapes for dtype in rec.dtypes\n for out_dtype in [None] + rec.dtypes\n for axis in set(range(-len(shape), len(shape))) | set([None])\n for keepdims in [False, True]))\n def testReducer(self, onp_op, lnp_op, rng, shape, dtype, out_dtype, axis, keepdims):\n onp_fun = lambda x: onp_op(x, axis, dtype=out_dtype, keepdims=keepdims)\n lnp_fun = lambda x: lnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"{}_inshape={}_axis={}_keepdims={}\".format(\n rec.test_name.capitalize(),\n jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),\n \"rng\": rec.rng, \"shape\": shape, \"dtype\": dtype,\n \"onp_op\": getattr(onp, rec.name), \"lnp_op\": getattr(lnp, rec.name),\n \"axis\": axis, \"keepdims\": keepdims}\n for rec in JAX_REDUCER_NO_DTYPE_RECORDS\n for shape in rec.shapes for dtype in rec.dtypes\n for axis in set(range(-len(shape), len(shape))) | set([None])\n for keepdims in [False, True]))\n def testReducerNoDtype(self, onp_op, lnp_op, rng, shape, dtype, axis, keepdims):\n onp_fun = lambda x: onp_op(x, axis, keepdims=keepdims)\n lnp_fun = lambda x: lnp_op(x, axis, keepdims=keepdims)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for shape in all_shapes for dtype in all_dtypes\n for axis in set(range(-len(shape), len(shape))) | set([None])))\n def testCountNonzero(self, shape, dtype, axis):\n rng = jtu.rand_some_zero()\n onp_fun = lambda x: onp.count_nonzero(x, axis)\n lnp_fun = lambda x: lnp.count_nonzero(x, axis)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"{}_inshape={}_axis={}\".format(\n rec.test_name.capitalize(),\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"rng\": rec.rng, \"shape\": shape, \"dtype\": dtype,\n \"onp_op\": getattr(onp, rec.name), \"lnp_op\": getattr(lnp, rec.name),\n \"axis\": axis}\n for rec in JAX_ARGMINMAX_RECORDS\n for shape in rec.shapes for dtype in rec.dtypes\n for axis in range(-len(shape), len(shape))))\n def testArgMinMax(self, onp_op, lnp_op, rng, shape, dtype, axis):\n if (dtype == onp.complex128 and FLAGS.jax_test_dut and\n FLAGS.jax_test_dut.startswith(\"gpu\")):\n raise unittest.SkipTest(\"complex128 reductions not supported on GPU\")\n\n def onp_fun(array_to_reduce):\n return onp_op(array_to_reduce, axis)\n\n def lnp_fun(array_to_reduce):\n return lnp_op(array_to_reduce, axis)\n\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_{}\".format(\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),\n axes),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"axes\": axes, \"rng\": rng}\n for rng in [jtu.rand_default()]\n for lhs_shape, rhs_shape, axes in [\n [(2,), (2,), (-1, -1, -1, None)], # scalar output\n [(2, 4), (2, 4), (-1, -1, -1, 0)], # 2D vectors\n [(3, 4), (3, 4), (-1, -1, -1, 0)], # 3D vectors\n [(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)], # broadcasting\n [(4, 3), (3, 6, 5, 4), (1, 0, -1, None)], # different axes\n [(6, 1, 3), (5, 3), (-1, -1, -1, None)], # more broadcasting\n [(6, 1, 2), (5, 3), (-1, -1, -1, None)], # mixed 2D and 3D vectors\n [(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)], # axes/broadcasting\n [(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)], # axisc should do nothing\n [(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)] # same as before\n ]\n for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))\n def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng):\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n axisa, axisb, axisc, axis = axes\n lnp_fun = lambda a, b: lnp.cross(a, b, axisa, axisb, axisc, axis)\n onp_fun = lambda a, b: onp.cross(a, b, axisa, axisb, axisc, axis)\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_{}\".format(\n name,\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"rng\": rng}\n for rng in [jtu.rand_default()]\n for name, lhs_shape, rhs_shape in [\n (\"matrix-scalar\", (3, 3), ()),\n (\"scalar-matrix\", (), (3, 3)),\n (\"matrix-vector\", (4, 5), (5,)),\n (\"vector-matrix\", (6,), (6, 4)),\n (\"matrix-matrix\", (3, 4), (4, 5)),\n (\"tensor-vector\", (4, 3, 2), (2,)),\n (\"vector-tensor\", (2,), (3, 2, 4)),\n (\"tensor-matrix\", (4, 3, 2), (2, 5)),\n (\"matrix-tensor\", (5, 2), (3, 2, 4)),\n (\"tensor-tensor\", (2, 3, 4), (5, 4, 1))]\n for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))\n def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng):\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n self._CheckAgainstNumpy(onp.dot, lnp.dot, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp.dot, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_{}\".format(\n name,\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"rng\": rng}\n for rng in [jtu.rand_default()]\n for name, lhs_shape, rhs_shape in [\n (\"vector-vector\", (3,), (3,)),\n (\"matrix-vector\", (3, 3), (3,)),\n (\"vector-matrix\", (3,), (3, 3)),\n (\"matrix-matrix\", (3, 3), (3, 3)),\n (\"vector-tensor\", (3,), (5, 3, 2)),\n (\"tensor-vector\", (5, 3, 2), (2,)),\n (\"matrix-tensor\", (5, 2), (3, 2, 4)),\n (\"tensor-matrix\", (5, 2, 3), (3, 2)),\n (\"tensor-tensor\", (5, 3, 4), (5, 4, 1)),\n (\"tensor-tensor-broadcast\", (3, 1, 3, 4), (5, 4, 1))]\n for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))\n def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng):\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n self._CheckAgainstNumpy(onp.matmul, lnp.matmul, args_maker,\n check_dtypes=True)\n self._CompileAndCheck(lnp.matmul, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_{}\".format(\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),\n axes),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"axes\": axes, \"rng\": rng}\n for rng in [jtu.rand_default()]\n for lhs_shape, rhs_shape, axes in [\n [(2, 3, 4), (5, 6, 7), 0], # from issue #740\n [(2, 3, 4), (3, 4, 5, 6), 2],\n [(2, 3, 4), (5, 4, 3, 6), [1, 2]],\n [(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],\n [(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],\n ]\n for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))\n def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng):\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n lnp_fun = lambda a, b: lnp.tensordot(a, b, axes)\n onp_fun = lambda a, b: onp.tensordot(a, b, axes)\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}\".format(\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"rng\": jtu.rand_default()}\n # TODO(phawkins): support integer dtypes too.\n for lhs_dtype, rhs_dtype in CombosWithReplacement(inexact_dtypes, 2)\n for lhs_shape, rhs_shape in [\n (l, r) for l, r in CombosWithReplacement(all_shapes, 2)\n if len(jtu._dims_of_shape(l)) == 0\n or len(jtu._dims_of_shape(r)) == 0\n or l[-1] == r[-1]]))\n def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng):\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n onp_fun = lambda lhs, rhs: onp.inner(lhs, rhs)\n lnp_fun = lambda lhs, rhs: lnp.inner(lhs, rhs)\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_amin={}_amax={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),\n \"shape\": shape, \"dtype\": dtype, \"a_min\": a_min, \"a_max\": a_max,\n \"rng\": jtu.rand_default()}\n for shape in all_shapes for dtype in number_dtypes\n for a_min, a_max in [(-1, None), (None, 1), (-1, 1)]))\n def testClipStaticBounds(self, shape, dtype, a_min, a_max, rng):\n onp_fun = lambda x: onp.clip(x, a_min=a_min, a_max=a_max)\n lnp_fun = lambda x: lnp.clip(x, a_min=a_min, a_max=a_max)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_decimals={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), decimals),\n \"shape\": shape, \"dtype\": dtype, \"decimals\": decimals,\n \"rng\": jtu.rand_default()}\n for shape in all_shapes for dtype in number_dtypes\n for decimals in [0, 1, -2]))\n def testRoundStaticDecimals(self, shape, dtype, decimals, rng):\n if onp.issubdtype(dtype, onp.integer) and decimals < 0:\n self.skipTest(\"Integer rounding with decimals < 0 not implemented\")\n onp_fun = lambda x: onp.round(x, decimals=decimals)\n lnp_fun = lambda x: lnp.round(x, decimals=decimals)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_mode={}_rpadwidth={}_rconstantvalues={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), mode, pad_width_rank,\n constant_values_rank),\n \"shape\": shape, \"dtype\": dtype, \"mode\": mode,\n \"pad_width_rank\": pad_width_rank,\n \"constant_values_rank\": constant_values_rank, \"rng\": jtu.rand_default(),\n \"irng\": jtu.rand_int(3)}\n for mode, constant_values_rank, shapes in [\n ('constant', 0, all_shapes),\n ('constant', 1, all_shapes),\n ('constant', 2, all_shapes),\n ('symmetric', None, nonempty_shapes),\n ('reflect', None, nonempty_shapes),\n ('wrap', None, nonempty_shapes),\n ]\n for shape in shapes for dtype in all_dtypes\n for pad_width_rank in range(3)))\n def testPad(self, shape, dtype, mode, pad_width_rank, constant_values_rank,\n rng, irng):\n pad_width = irng([len(shape), 2][2 - pad_width_rank:], onp.int32)\n def onp_fun(x, kwargs):\n if pad_width.size == 0:\n return x\n return onp.pad(x, pad_width, mode=mode, **kwargs)\n def lnp_fun(x, kwargs):\n return lnp.pad(x, pad_width, mode=mode, **kwargs)\n\n def args_maker():\n kwargs = {}\n if constant_values_rank:\n kwargs[\"constant_values\"] = rng(\n [len(shape), 2][2 - constant_values_rank:], dtype)\n return rng(shape, dtype), kwargs\n\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape=[{}]_reps={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), reps),\n \"shape\": shape, \"dtype\": dtype, \"reps\": reps,\n \"rng\": jtu.rand_default()}\n for reps in [(), (2,), (3, 4), (2, 3, 4)]\n for dtype in default_dtypes\n for shape in all_shapes\n ))\n def testTile(self, shape, dtype, reps, rng):\n onp_fun = lambda arg: onp.tile(arg, reps)\n lnp_fun = lambda arg: lnp.tile(arg, reps)\n\n args_maker = lambda: [rng(shape, dtype)]\n\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_axis={}_baseshape=[{}]_dtypes=[{}]\".format(\n axis, \",\".join(str(d) for d in base_shape),\n \",\".join(onp.dtype(dtype).name for dtype in dtypes)),\n \"axis\": axis, \"base_shape\": base_shape, \"dtypes\": dtypes,\n \"rng\": jtu.rand_default()}\n for num_arrs in [3]\n for dtypes in CombosWithReplacement(default_dtypes, num_arrs)\n for base_shape in [(4,), (3, 4), (2, 3, 4)]\n for axis in range(-len(base_shape)+1, len(base_shape))))\n def testConcatenate(self, axis, base_shape, dtypes, rng):\n wrapped_axis = axis % len(base_shape)\n shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]\n for size, _ in zip(itertools.cycle([3, 1, 4]), dtypes)]\n onp_fun = lambda *args: onp.concatenate(args, axis=axis)\n lnp_fun = lambda *args: lnp.concatenate(args, axis=axis)\n\n def args_maker():\n return [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]\n\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_axis={}_baseshape=[{}]_dtypes=[{}]\".format(\n axis, \",\".join(str(d) for d in base_shape),\n \",\".join(onp.dtype(dtype).name for dtype in dtypes)),\n \"axis\": axis, \"base_shape\": base_shape, \"dtypes\": dtypes,\n \"rng\": jtu.rand_default()}\n for dtypes in CombosWithReplacement(default_dtypes, 2)\n for base_shape in [(4,), (3, 4), (2, 3, 4)]\n for axis in range(-len(base_shape)+1, len(base_shape))))\n def testAppend(self, axis, base_shape, dtypes, rng):\n wrapped_axis = axis % len(base_shape)\n shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]\n for size, _ in zip(itertools.cycle([3, 1, 4]), dtypes)]\n onp_fun = lambda arr, values: onp.append(arr, values, axis=axis)\n lnp_fun = lambda arr, values: lnp.append(arr, values, axis=axis)\n\n def args_maker():\n return [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]\n\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape=[{}]_axis={}_repeats={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, repeats),\n \"axis\": axis, \"shape\": shape, \"dtype\": dtype, \"repeats\": repeats,\n \"rng\": jtu.rand_default()}\n for repeats in [0, 1, 2]\n for dtype in default_dtypes\n for shape in all_shapes\n for axis in [None] + list(range(-len(shape), len(shape)))))\n def testRepeat(self, axis, shape, dtype, repeats, rng):\n onp_fun = lambda arg: onp.repeat(arg, repeats=repeats, axis=axis)\n lnp_fun = lambda arg: lnp.repeat(arg, repeats=repeats, axis=axis)\n\n args_maker = lambda: [rng(shape, dtype)]\n\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"op={}_shape=[{}]_axis={}_out_dtype={}\".format(\n op, jtu.format_shape_dtype_string(shape, dtype), axis, out_dtype),\n \"axis\": axis, \"shape\": shape, \"dtype\": dtype, \"out_dtype\": out_dtype,\n \"rng\": jtu.rand_default(), \"lnp_op\": getattr(lnp, op),\n \"onp_op\": getattr(onp, op)}\n for op in [\"cumsum\", \"cumprod\"]\n # TODO(phawkins): replace both type lists with default_dtypes after a\n # Jaxlib update includes\n # https://github.com/google/jax/commit/86f5d189cf563b027c3cd00eea38072c003905c8\n for dtype in [onp.float32, onp.int32]\n for out_dtype in [onp.float32, onp.int32]\n for shape in all_shapes\n for axis in [None] + list(range(-len(shape), len(shape)))))\n def testCumSumProd(self, axis, shape, dtype, out_dtype, onp_op, lnp_op, rng):\n onp_fun = lambda arg: onp_op(arg, axis=axis, dtype=out_dtype)\n lnp_fun = lambda arg: lnp_op(arg, axis=axis, dtype=out_dtype)\n\n args_maker = lambda: [rng(shape, dtype)]\n\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_dtype={}_m={}_n={}_k={}\".format(\n onp.dtype(dtype).name, m, n, k),\n \"m\": m, \"n\": n, \"k\": k, \"dtype\": dtype, \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for n in [0, 4]\n for m in [None, 0, 1, 3, 4]\n for k in list(range(-4, 4))))\n def testTri(self, m, n, k, dtype, rng):\n onp_fun = lambda: onp.tri(n, M=m, k=k, dtype=dtype)\n lnp_fun = lambda: lnp.tri(n, M=m, k=k, dtype=dtype)\n args_maker = lambda: []\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_shape={}_k={}\".format(\n op, jtu.format_shape_dtype_string(shape, dtype), k),\n \"dtype\": dtype, \"shape\": shape, \"op\": op, \"k\": k,\n \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for shape in [shape for shape in all_shapes if len(shape) >= 2]\n for op in [\"tril\", \"triu\"]\n for k in list(range(-3, 3))))\n def testTriLU(self, dtype, shape, op, k, rng):\n onp_fun = lambda arg: getattr(onp, op)(arg, k=k)\n lnp_fun = lambda arg: getattr(lnp, op)(arg, k=k)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_k={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), k),\n \"dtype\": dtype, \"shape\": shape, \"k\": k, \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]\n for k in list(range(-4, 4))))\n def testDiag(self, shape, dtype, k, rng):\n onp_fun = lambda arg: onp.diag(arg, k)\n lnp_fun = lambda arg: lnp.diag(arg, k)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_offset={}_axis1={}_axis2={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),\n \"dtype\": dtype, \"shape\": shape, \"offset\": offset, \"axis1\": axis1,\n \"axis2\": axis2, \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for shape in [shape for shape in all_shapes if len(shape) >= 2]\n for axis1 in range(-len(shape), len(shape))\n for axis2 in [a for a in range(-len(shape), len(shape))\n if a % len(shape) != axis1 % len(shape)]\n for offset in list(range(-4, 4))))\n def testDiagonal(self, shape, dtype, offset, axis1, axis2, rng):\n onp_fun = lambda arg: onp.diagonal(arg, offset, axis1, axis2)\n lnp_fun = lambda arg: lnp.diagonal(arg, offset, axis1, axis2)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_n={}\".format(onp.dtype(dtype).name, n),\n \"dtype\": dtype, \"n\": n}\n for dtype in default_dtypes\n for n in list(range(4))))\n def testIdentity(self, n, dtype):\n onp_fun = lambda: onp.identity(n, dtype)\n lnp_fun = lambda: lnp.identity(n, dtype)\n args_maker = lambda: []\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_dtype_{}_offset={}_axis1={}_axis2={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n out_dtype, offset, axis1, axis2),\n \"dtype\": dtype, \"out_dtype\": out_dtype, \"shape\": shape, \"offset\": offset,\n \"axis1\": axis1, \"axis2\": axis2, \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for out_dtype in [None] + number_dtypes\n for shape in [shape for shape in all_shapes if len(shape) >= 2]\n for axis1 in range(-len(shape), len(shape))\n for axis2 in range(-len(shape), len(shape))\n if (axis1 % len(shape)) != (axis2 % len(shape))\n for offset in list(range(-4, 4))))\n def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng):\n onp_fun = lambda arg: onp.trace(arg, offset, axis1, axis2, out_dtype)\n lnp_fun = lambda arg: lnp.trace(arg, offset, axis1, axis2, out_dtype)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}\".format(\n jtu.format_test_name_suffix(\"\", [shape] * len(dtypes), dtypes), axis),\n \"shape\": shape, \"axis\": axis, \"dtypes\": dtypes, \"rng\": rng}\n for dtypes in [\n [onp.float32],\n [onp.float32, onp.float32],\n [onp.float32, onp.int32, onp.float32],\n [onp.float32, onp.int64, onp.float32],\n [onp.float32, onp.int32, onp.float64],\n ]\n for shape in [(), (2,), (3, 4), (1, 100)]\n for axis in range(-len(shape), len(shape) + 1)\n for rng in [jtu.rand_default()]))\n def testStack(self, shape, axis, dtypes, rng):\n args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]\n onp_fun = partial(onp.stack, axis=axis)\n lnp_fun = partial(lnp.stack, axis=axis)\n self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_{}\".format(\n op, jtu.format_test_name_suffix(\"\", [shape] * len(dtypes), dtypes)),\n \"shape\": shape, \"op\": op, \"dtypes\": dtypes, \"rng\": rng}\n for op in [\"hstack\", \"vstack\", \"dstack\"]\n for dtypes in [\n [onp.float32],\n [onp.float32, onp.float32],\n [onp.float32, onp.int32, onp.float32],\n [onp.float32, onp.int64, onp.float32],\n [onp.float32, onp.int32, onp.float64],\n ]\n for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]\n for rng in [jtu.rand_default()]))\n def testHVDStack(self, shape, op, dtypes, rng):\n args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]\n onp_fun = getattr(onp, op)\n lnp_fun = getattr(lnp, op)\n self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outdtype={}\".format(\n jtu.format_shape_dtype_string(shape, fill_value_dtype),\n onp.dtype(out_dtype).name if out_dtype else \"None\"),\n \"shape\": shape, \"fill_value_dtype\": fill_value_dtype,\n \"out_dtype\": out_dtype, \"rng\": jtu.rand_default()}\n for shape in array_shapes\n for fill_value_dtype in default_dtypes\n for out_dtype in [None] + default_dtypes))\n def testFull(self, shape, fill_value_dtype, out_dtype, rng):\n onp_fun = lambda fill_value: onp.full(shape, fill_value, dtype=out_dtype)\n lnp_fun = lambda fill_value: lnp.full(shape, fill_value, dtype=out_dtype)\n args_maker = lambda: [rng((), fill_value_dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_filldtype={}_outdtype={}\".format(\n jtu.format_shape_dtype_string(shape, in_dtype),\n onp.dtype(fill_value_dtype).name,\n onp.dtype(out_dtype).name),\n \"shape\": shape, \"in_dtype\": in_dtype,\n \"fill_value_dtype\": fill_value_dtype, \"out_dtype\": out_dtype,\n \"rng\": jtu.rand_default()}\n for shape in array_shapes\n for in_dtype in default_dtypes\n for fill_value_dtype in default_dtypes\n for out_dtype in default_dtypes))\n def testFullLike(self, shape, in_dtype, fill_value_dtype, out_dtype, rng):\n onp_fun = lambda x, fill_value: onp.full_like(x, fill_value, dtype=out_dtype)\n lnp_fun = lambda x, fill_value: lnp.full_like(x, fill_value, dtype=out_dtype)\n args_maker = lambda: [rng(shape, in_dtype), rng((), fill_value_dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_{}sections\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),\n \"shape\": shape, \"num_sections\": num_sections, \"axis\": axis,\n \"dtype\": dtype, \"rng\": jtu.rand_default()}\n for shape, axis, num_sections in [\n ((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),\n ((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]\n for dtype in default_dtypes))\n def testSplitStaticInt(self, shape, num_sections, axis, dtype, rng):\n onp_fun = lambda x: onp.split(x, num_sections, axis=axis)\n lnp_fun = lambda x: lnp.split(x, num_sections, axis=axis)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_{}sections\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),\n \"shape\": shape, \"num_sections\": num_sections, \"axis\": axis,\n \"dtype\": dtype, \"rng\": jtu.rand_default()}\n for shape, axis, num_sections in [\n ((12, 4), 0, 4), ((12, 4), 1, 2),\n ((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]\n for dtype in default_dtypes))\n def testHVDSplit(self, shape, num_sections, axis, dtype, rng):\n def fn(module, axis):\n if axis == 0:\n return module.vsplit\n elif axis == 1:\n return module.hsplit\n else:\n assert axis == 2\n return module.dsplit\n\n onp_fun = lambda x: fn(onp, axis)(x, num_sections)\n lnp_fun = lambda x: fn(lnp, axis)(x, num_sections)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}_order={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n jtu.format_shape_dtype_string(out_shape, dtype),\n order),\n \"arg_shape\": arg_shape, \"out_shape\": out_shape, \"dtype\": dtype,\n \"order\": order, \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for order in [\"C\", \"F\"]\n for arg_shape, out_shape in [\n (jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),\n ((), (1, 1, 1)),\n ((7, 0), (0, 42, 101)),\n ((3, 4), 12),\n ((3, 4), (12,)),\n ((3, 4), -1),\n ((2, 1, 4), (-1,)),\n ((2, 2, 4), (2, 8))\n ]))\n def testReshape(self, arg_shape, out_shape, dtype, order, rng):\n onp_fun = lambda x: onp.reshape(x, out_shape, order=order)\n lnp_fun = lambda x: lnp.reshape(x, out_shape, order=order)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n jtu.format_shape_dtype_string(out_shape, dtype)),\n \"arg_shape\": arg_shape, \"out_shape\": out_shape, \"dtype\": dtype,\n \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for arg_shape, out_shape in [\n ((7, 0), (0, 42, 101)),\n ((2, 1, 4), (-1,)),\n ((2, 2, 4), (2, 8))\n ]))\n def testReshapeMethod(self, arg_shape, out_shape, dtype, rng):\n onp_fun = lambda x: onp.reshape(x, out_shape)\n lnp_fun = lambda x: x.reshape(*out_shape)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_expanddim={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype), dim),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"dim\": dim,\n \"rng\": jtu.rand_default()}\n for arg_shape in [(), (3,), (3, 4)]\n for dtype in default_dtypes\n for dim in range(-len(arg_shape)+1, len(arg_shape))))\n def testExpandDimsStaticDim(self, arg_shape, dtype, dim, rng):\n onp_fun = lambda x: onp.expand_dims(x, dim)\n lnp_fun = lambda x: lnp.expand_dims(x, dim)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_axes=({},{})\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"ax1\": ax1, \"ax2\": ax2,\n \"rng\": jtu.rand_default()}\n for arg_shape, ax1, ax2 in [\n ((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),\n ((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]\n for dtype in default_dtypes))\n def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2, rng):\n onp_fun = lambda x: onp.swapaxes(x, ax1, ax2)\n lnp_fun = lambda x: lnp.swapaxes(x, ax1, ax2)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_axis={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype), ax),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"ax\": ax,\n \"rng\": jtu.rand_default()}\n for arg_shape, ax in [\n ((3, 1), None),\n ((3, 1), 1),\n ((1, 3, 1), (0, 2)),\n ((1, 4, 1), (0,))]\n for dtype in default_dtypes))\n def testSqueeze(self, arg_shape, dtype, ax, rng):\n onp_fun = lambda x: onp.squeeze(x, ax)\n lnp_fun = lambda x: lnp.squeeze(x, ax)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_axis={}_weights={}_returned={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n axis,\n (None if weights_shape == None else jtu.format_shape_dtype_string(weights_shape, dtype)),\n returned),\n \"rng\": jtu.rand_default(), \"shape\": shape, \"dtype\": dtype, \"axis\": axis,\n \"weights_shape\": weights_shape, \"returned\": returned}\n for shape in nonempty_shapes\n for dtype in number_dtypes\n for axis in set(range(-len(shape), len(shape))) | set([None])\n # `weights_shape` is either `None`, same as the averaged axis, or same as\n # that of the input\n for weights_shape in ([None, shape] if axis is None else [None, (shape[axis],), shape])\n for returned in [False, True]))\n def testAverage(self, shape, dtype, axis, weights_shape, returned, rng):\n onp_fun = lambda x, weights: onp.average(x, axis, weights, returned)\n lnp_fun = lambda x, weights: lnp.average(x, axis, weights, returned)\n args_maker = lambda: [rng(shape, dtype),\n None if weights_shape is None else rng(weights_shape, dtype)]\n\n try:\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n except ZeroDivisionError:\n self.skipTest(\"don't support checking for ZeroDivisionError\")\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_arg{}\".format(i), \"arg\": arg}\n for i, arg in enumerate([\n 3., [1, 2, 3], [1., 2., 3.],\n [[1, 2], [3, 4], [5, 6]], [[1, 2.], [3, 4], [5, 6]],\n [[3, onp.array(2), 1], onp.arange(3.)],\n ])))\n def testArray(self, arg):\n args_maker = lambda: [arg]\n self._CheckAgainstNumpy(onp.array, lnp.array, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp.array, args_maker, check_dtypes=True)\n\n def testIssue121(self):\n assert not onp.isscalar(lnp.array(3))\n\n def testArrayMethod(self):\n class arraylike(object):\n dtype = onp.float32\n def __array__(self, dtype=None):\n return 3.\n a = arraylike()\n ans = lnp.array(a)\n assert ans == 3.\n\n def testAllClose(self):\n rng = onp.random.RandomState(0)\n x = rng.randn(2, 2)\n y = rng.randn(2)\n\n def same(list1, list2):\n allclose = functools.partial(lnp.allclose, atol=1e-3, rtol=1e-3)\n elements_close = list(map(allclose, list1, list2))\n return lnp.all(lnp.array(elements_close))\n\n csame = api.jit(same)\n\n a1 = same((x, y), (x, y))\n a2 = csame((x, y), (x, y))\n a3 = csame((x, y), (x, 2 * y))\n\n self.assertTrue(a1)\n self.assertTrue(a2)\n self.assertFalse(a3)\n\n @jtu.skip_on_devices(\"tpu\") # TODO(mattjj): investigate this failure\n def testOnesBroadcastingConstantHandler(self):\n # TODO(mattjj): update this test for jax3\n self.skipTest(\"test needs jax3 update\")\n\n def fun(x):\n ones = lnp.ones((3, 4))\n assert isinstance(ones, onp.ndarray) and ones.strides == (0, 0)\n\n # To check that the constant handler generates a Broadcast for stride-zero\n # arrays, we monkey-patch the client instance.\n # TODO(mattjj): once we have better HLO dumping and inspecting facilities,\n # we can check the HLO more directly.\n c = x._node.c\n Broadcast = c.Broadcast # pylint: disable=invalid-name\n was_called = []\n c.Broadcast = lambda *args: was_called.append(True) or Broadcast(*args)\n out = x + ones # the ndarray constant handler should call Broadcast here\n assert was_called, \"Broadcast was not called.\"\n\n return out\n\n fun = api.jit(fun)\n out_val = fun(lnp.ones(4))\n self.assertAllClose(out_val, onp.full((3, 4), 2.), check_dtypes=False)\n\n def testZeroStridesConstantHandler(self):\n raw_const = onp.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)\n const = onp.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))\n\n def fun(x):\n return x * const\n\n fun = api.jit(fun)\n out_val = fun(3.)\n self.assertAllClose(out_val, 3. * const, check_dtypes=False)\n\n def testIsInstanceNdarrayDuringTracing(self):\n arr = onp.ones(3)\n\n @api.jit\n def f(x):\n self.assertIsInstance(x, lnp.ndarray)\n return lnp.sum(x)\n\n f(arr)\n\n\n def testNonArrayErrorMessage(self):\n x = [1., 2.]\n y = onp.array([3., 4.])\n\n def g(x, y):\n return lnp.add(x, y)\n\n def f(x, y):\n return lnp.dot(x, y)\n\n self.assertRaises(TypeError, lambda: g(x, y))\n self.assertRaises(TypeError, lambda: f(x, y))\n self.assertRaises(TypeError, lambda: api.jit(g)(x, y))\n self.assertRaises(TypeError, lambda: api.jit(f)(x, y))\n\n def testAbstractionErrorMessage(self):\n\n @api.jit\n def f(x, n):\n for _ in range(n):\n x = x * x\n return x\n\n self.assertRaises(TypeError, lambda: f(3., 3))\n\n @api.jit\n def g(x):\n if x > 0.:\n return x * 2\n else:\n return x + 2\n\n self.assertRaises(TypeError, lambda: g(3.))\n\n def testTracingPrimitiveWithNoTranslationErrorMessage(self):\n # TODO(mattjj): update this for jax3\n self.skipTest(\"test needs jax3 update\")\n foo = lnp._not_implemented(lambda x: x)\n\n # No error if there's no tracing.\n foo(onp.arange(3))\n\n cfoo = api.jit(foo)\n self.assertRaises(NotImplementedError, lambda: cfoo(onp.arange(3)))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for shape in [(3,), (2, 3)]\n for dtype in default_dtypes\n for axis in range(-len(shape), len(shape)) # Test negative axes\n for rng in [jtu.rand_default()]))\n def testFlip(self, shape, dtype, axis, rng):\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n lnp_op = lambda x: lnp.flip(x, axis)\n onp_op = lambda x: onp.flip(x, axis)\n self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype}\n for shape in [(3,), (2, 3), (3, 2, 4)]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testFlipud(self, shape, dtype, rng):\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n lnp_op = lambda x: lnp.flipud(x)\n onp_op = lambda x: onp.flipud(x)\n self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype}\n for shape in [(3, 2), (2, 3), (3, 2, 4)]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testFliplr(self, shape, dtype, rng):\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n lnp_op = lambda x: lnp.fliplr(x)\n onp_op = lambda x: onp.fliplr(x)\n self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_k={}_axes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), k, axes),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype, \"k\": k, \"axes\": axes}\n for shape, axes in [\n [(2, 3), (0, 1)],\n [(2, 3), (1, 0)],\n [(4, 3, 2), (0, 2)],\n [(4, 3, 2), (2, 1)],\n ]\n for k in range(-3, 4)\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testRot90(self, shape, dtype, k, axes, rng):\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n lnp_op = lambda x: lnp.rot90(x, k, axes)\n onp_op = lambda x: onp.rot90(x, k, axes)\n self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n # TODO(mattjj): test infix operator overrides\n\n def testRavel(self):\n rng = onp.random.RandomState(0)\n args_maker = lambda: [rng.randn(3, 4).astype(\"float32\")]\n self._CompileAndCheck(lambda x: x.ravel(), args_maker, check_dtypes=True)\n\n def testAstype(self):\n rng = onp.random.RandomState(0)\n args_maker = lambda: [rng.randn(3, 4).astype(\"float32\")]\n op = lambda x: x.astype(lnp.int32)\n self._CheckAgainstNumpy(op, op, args_maker, check_dtypes=True)\n self._CompileAndCheck(op, args_maker, check_dtypes=True)\n\n # TODO(mattjj): test other ndarray-like method overrides\n\n def testOnpMean(self):\n # from https://github.com/google/jax/issues/125\n x = lax.add(lnp.eye(3), 0.)\n ans = onp.mean(x)\n self.assertAllClose(ans, onp.array(1./3), check_dtypes=False)\n\n def testArangeOnFloats(self):\n # from https://github.com/google/jax/issues/145\n expected = onp.arange(0.0, 1.0, 0.1)\n ans = lnp.arange(0.0, 1.0, 0.1)\n self.assertAllClose(expected, ans, check_dtypes=True)\n\n def testSortManually(self):\n # manual tests for sort are nice because we don't have to worry about ties.\n # lax.sort is tested combinatorially.\n ans = lnp.sort(onp.array([16, 15, 23, 42, 8, 4]))\n expected = onp.array([4, 8, 15, 16, 23, 42])\n self.assertAllClose(expected, ans, check_dtypes=True)\n\n a = onp.array([[1, 4], [3, 1]])\n ans = lnp.sort(a, axis=None)\n expected = onp.array([1, 1, 3, 4])\n self.assertAllClose(expected, ans, check_dtypes=True)\n\n a = onp.array([[1, 4], [3, 1]])\n ans = lnp.sort(a) # last axis\n expected = onp.array([[1, 4], [1, 3]])\n self.assertAllClose(expected, ans, check_dtypes=True)\n\n a = onp.array([[1, 4], [3, 1]])\n ans = lnp.sort(a, axis=0)\n expected = onp.array([[1, 1], [3, 4]])\n self.assertAllClose(expected, ans, check_dtypes=True)\n\n def testArgsortManually(self):\n x = onp.array([16, 15, 23, 42, 8, 4])\n ans = lnp.argsort(x)\n expected = onp.argsort(x)\n self.assertAllClose(expected, ans, check_dtypes=False)\n\n x = onp.array([[16, 15, 23], [42, 8, 4]])\n ans = lnp.argsort(x, axis=0)\n expected = onp.argsort(x, axis=0)\n self.assertAllClose(expected, ans, check_dtypes=False)\n\n x = onp.array([[16, 15, 23], [42, 8, 4]])\n ans = lnp.argsort(x, axis=1)\n expected = onp.argsort(x, axis=1)\n self.assertAllClose(expected, ans, check_dtypes=False)\n\n x = onp.array([[16, 15, 23], [42, 8, 4]])\n ans = lnp.argsort(x, axis=None)\n expected = onp.argsort(x, axis=None)\n self.assertAllClose(expected, ans, check_dtypes=False)\n\n x = onp.array([[16, 15, 23], [42, 8, 4]])\n ans = lnp.argsort(x)\n expected = onp.argsort(x)\n self.assertAllClose(expected, ans, check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_shifts={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n shifts, axis),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype, \"shifts\": shifts,\n \"axis\": axis}\n for dtype in all_dtypes\n for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]\n for shifts, axis in [\n (3, None),\n (1, 1),\n ((3,), (0,)),\n ((-2,), (-2,)),\n ((1, 2), (0, -1))\n ]\n for rng in [jtu.rand_default()]))\n def testRoll(self, shape, dtype, shifts, axis, rng):\n args_maker = lambda: [rng(shape, dtype)]\n lnp_op = lambda x: lnp.roll(x, shifts, axis=axis)\n onp_op = lambda x: onp.roll(x, shifts, axis=axis)\n self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_index={}_axis={}_mode={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n jtu.format_shape_dtype_string(index_shape, index_dtype),\n axis, mode),\n \"rng\": rng, \"rng_indices\": rng_indices, \"shape\": shape,\n \"index_shape\": index_shape, \"dtype\": dtype, \"index_dtype\": index_dtype,\n \"axis\": axis, \"mode\": mode}\n for shape in [(3,), (3, 4), (3, 4, 5)]\n for index_shape in scalar_shapes + [(3,), (2, 1, 3)]\n for axis in itertools.chain(range(-len(shape), len(shape)), [None])\n for dtype in all_dtypes\n for index_dtype in int_dtypes\n for mode in ['wrap', 'clip']\n for rng in [jtu.rand_default()]\n for rng_indices in [jtu.rand_int(-5, 5)]))\n def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode, rng,\n rng_indices):\n def args_maker():\n x = rng(shape, dtype)\n i = rng_indices(index_shape, index_dtype)\n return x, i\n\n lnp_op = lambda x, i: lnp.take(x, i, axis=axis, mode=mode)\n onp_op = lambda x, i: onp.take(x, i, axis=axis, mode=mode)\n self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for shape in [(3,), (3, 4), (3, 4, 5)]\n for axis in itertools.chain(range(len(shape)), [-1], [None])\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testTakeAlongAxis(self, shape, dtype, axis, rng):\n def args_maker():\n x = rng(shape, dtype)\n i = onp.argsort(x, axis=axis)\n return x, i\n\n lnp_op = lambda x, i: lnp.take_along_axis(x, i, axis=axis)\n\n if hasattr(onp, \"take_along_axis\"):\n onp_op = lambda x, i: onp.take_along_axis(x, i, axis=axis)\n self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_n={}_increasing={}\".format(\n jtu.format_shape_dtype_string([shape], dtype),\n n, increasing),\n \"dtype\": dtype, \"shape\": shape, \"n\": n, \"increasing\": increasing,\n \"rng\": jtu.rand_default()}\n for dtype in inexact_dtypes\n for shape in [0, 5]\n for n in [2, 4]\n for increasing in [False, True]))\n def testVander(self, shape, dtype, n, increasing, rng):\n onp_fun = lambda arg: onp.vander(arg, N=n, increasing=increasing)\n lnp_fun = lambda arg: lnp.vander(arg, N=n, increasing=increasing)\n args_maker = lambda: [rng([shape], dtype)]\n # np.vander seems to return float64 for all floating types. We could obey\n # those semantics, but they seem like a bug.\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\"nan_to_num\", [shape],\n [dtype]),\n \"rng\": jtu.rand_some_inf_and_nan(), \"shape\": shape, \"dtype\": dtype}\n for shape in all_shapes\n for dtype in inexact_dtypes))\n def testNanToNum(self, rng, shape, dtype):\n dtype = onp.dtype(xla_bridge.canonicalize_dtype(dtype)).type\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp.nan_to_num, lnp.nan_to_num, args_maker,\n check_dtypes=True)\n self._CompileAndCheck(lnp.nan_to_num, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\"ix_\", shapes, dtypes),\n \"rng\": jtu.rand_default(), \"shapes\": shapes, \"dtypes\": dtypes}\n for shapes, dtypes in (\n ((), ()),\n (((7,),), (onp.float32,)),\n (((3,), (4,)), (onp.float32, onp.int32)),\n (((3,), (0,), (4,)), (onp.int32, onp.float32, onp.int32)),\n )))\n def testIx_(self, rng, shapes, dtypes):\n args_maker = lambda: [rng(shape, dtype)\n for shape, dtype in zip(shapes, dtypes)]\n self._CheckAgainstNumpy(onp.ix_, lnp.ix_, args_maker,\n check_dtypes=True)\n self._CompileAndCheck(lnp.ix_, args_maker, check_dtypes=True)\n\n def testIssue330(self):\n x = lnp.full((1, 1), lnp.array([1])[0]) # doesn't crash\n self.assertEqual(x[0, 0], 1)\n\n def testScalarDtypePromotion(self):\n # disabled this test after https://github.com/google/jax/issues/732\n msg = (\"jax.numpy differs from numpy in promotion rules for Python scalars.\"\n \" See https://github.com/google/jax/issues/732.\")\n raise SkipTest(msg)\n orig_numpy_result = (1 + onp.eye(1, dtype=onp.float32)).dtype\n jax_numpy_result = (1 + lnp.eye(1, dtype=lnp.float32)).dtype\n self.assertEqual(orig_numpy_result, jax_numpy_result)\n\n def testSymmetrizeDtypePromotion(self):\n x = onp.eye(3, dtype=onp.float32)\n orig_numpy_result = ((x + x.T) / 2).dtype\n\n x = lnp.eye(3, dtype=lnp.float32)\n jax_numpy_result = ((x + x.T) / 2).dtype\n self.assertEqual(orig_numpy_result, jax_numpy_result)\n\n def testIssue347(self):\n # https://github.com/google/jax/issues/347\n def test_fail(x):\n x = lnp.sqrt(lnp.sum(x ** 2, axis=1))\n ones = lnp.ones_like(x)\n x = lnp.where(x > 0.5, x, ones)\n return lnp.sum(x)\n\n x = lnp.array([[1, 2], [3, 4], [0, 0]], dtype=lnp.float64)\n result = api.grad(test_fail)(x)\n assert not onp.any(onp.isnan(result))\n\n def testIssue453(self):\n # https://github.com/google/jax/issues/453\n a = onp.arange(6) + 1\n ans = lnp.reshape(a, (3, 2), order='F')\n expected = onp.reshape(a, (3, 2), order='F')\n self.assertAllClose(ans, expected, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_dtype={}\".format(\n op, {bool: \"bool\", int: \"int\", float: \"float\"}[dtype]),\n \"dtype\": dtype, \"op\": op}\n for dtype in [int, float, bool]\n for op in [\"atleast_1d\", \"atleast_2d\", \"atleast_3d\"]))\n def testAtLeastNdLiterals(self, dtype, op):\n # Fixes: https://github.com/google/jax/issues/634\n onp_fun = lambda arg: getattr(onp, op)(arg)\n lnp_fun = lambda arg: getattr(lnp, op)(arg)\n args_maker = lambda: [dtype(2)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n\n def testLongLong(self):\n self.assertAllClose(onp.int64(7), api.jit(lambda x: x)(onp.longlong(7)),\n check_dtypes=True)\n\n def testArange(self):\n # test cases inspired by dask tests at\n # https://github.com/dask/dask/blob/master/dask/array/tests/test_creation.py#L92\n self.assertAllClose(lnp.arange(77),\n onp.arange(77), check_dtypes=True)\n self.assertAllClose(lnp.arange(2, 13),\n onp.arange(2, 13), check_dtypes=True)\n self.assertAllClose(lnp.arange(4, 21, 9),\n onp.arange(4, 21, 9), check_dtypes=True)\n self.assertAllClose(lnp.arange(53, 5, -3),\n onp.arange(53, 5, -3), check_dtypes=True)\n # TODO(mattjj): make these tests work when jax_enable_x64=True\n # self.assertAllClose(lnp.arange(77, dtype=float),\n # onp.arange(77, dtype=float), check_dtypes=True)\n # self.assertAllClose(lnp.arange(2, 13, dtype=int),\n # onp.arange(2, 13, dtype=int), check_dtypes=True)\n self.assertAllClose(lnp.arange(0, 1, -0.5),\n onp.arange(0, 1, -0.5), check_dtypes=True)\n\n self.assertRaises(TypeError, lambda: lnp.arange())\n\n # test that lnp.arange(N) doesn't instantiate an ndarray\n self.assertFalse(type(lnp.arange(77)) == type(onp.arange(77)))\n self.assertTrue(type(lnp.arange(77)) == type(lax.iota(onp.int32, 77)))\n\n def testIssue830(self):\n a = lnp.arange(4, dtype=lnp.complex64)\n self.assertEqual(a.dtype, lnp.complex64)\n\n def testIssue728(self):\n assert lnp.allclose(lnp.eye(5000), onp.eye(5000))\n self.assertEqual(0, onp.sum(lnp.eye(1050) - onp.eye(1050)))\n\n def testIssue746(self):\n lnp.arange(12).reshape(3, 4) # doesn't crash\n\n def testIssue764(self):\n x = lnp.linspace(190, 200, 4)\n f = api.grad(lambda x: lnp.sum(lnp.tanh(x)))\n # Expected values computed with autograd in float64 precision.\n expected = onp.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,\n 7.66067839e-174], onp.float64)\n self.assertAllClose(f(x), expected, check_dtypes=False)\n\n def testIssue776(self):\n \"\"\"Tests that the scatter-add transpose rule instantiates symbolic zeros.\"\"\"\n def f(u):\n y = jax.ops.index_add(onp.ones(10,), [2, 4, 5], u)\n # The transpose rule for lax.tie_in returns a symbolic zero for its first\n # argument.\n return lax.tie_in(y, 7.)\n\n self.assertAllClose(onp.zeros(3,), api.grad(f)(onp.ones(3,)),\n check_dtypes=True)\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(op, [()], [dtype]),\n \"dtype\": dtype, \"op\": op}\n for dtype in float_dtypes\n for op in (\"sqrt\", \"arccos\", \"arcsin\", \"arctan\", \"sin\", \"cos\", \"tan\",\n \"sinh\", \"cosh\", \"tanh\", \"arccosh\", \"arcsinh\", \"arctanh\", \"exp\",\n \"log\", \"expm1\", \"log1p\")))\n def testMathSpecialFloatValues(self, op, dtype):\n onp_op = getattr(onp, op)\n lnp_op = getattr(lnp, op)\n dtype = onp.dtype(xla_bridge.canonicalize_dtype(dtype)).type\n for x in (onp.nan, -onp.inf, -100., -2. -1., 0., 1., 2., 100., onp.inf,\n onp.finfo(dtype).max, onp.sqrt(onp.finfo(dtype).max),\n onp.sqrt(onp.finfo(dtype).max) * 2.):\n if onp.isnan(x) and op in (\"cosh\", \"expm1\", \"exp\"):\n # TODO(b/133842876, b/133842870): these return wrong outputs on CPU for\n # NaN inputs.\n continue\n if (op in (\"sin\", \"cos\", \"tan\", \"arctan\") and FLAGS.jax_test_dut and\n FLAGS.jax_test_dut.startswith(\"tpu\")):\n continue # TODO(b/132196789, b/134175194): fix and reenable.\n x = dtype(x)\n expected = onp_op(x)\n actual = lnp_op(x)\n self.assertAllClose(expected, actual, check_dtypes=True)\n\n def testIssue883(self):\n # from https://github.com/google/jax/issues/883\n\n @partial(api.jit, static_argnums=(1,))\n def f(x, v):\n return x\n\n x = lnp.ones((10, 10))\n v = lnp.array([1, 2, 3])\n first_call = f(x, v)\n second_call = f(x, v) # doesn't crash\n\n def testReductionOfOutOfBoundsAxis(self): # Issue 888\n x = lnp.ones((3, 4))\n self.assertRaises(ValueError, lambda: lnp.sum(x, axis=2))\n\n\nif __name__ == \"__main__\":\n absltest.main()\n" ]
[ [ "numpy.diag", "numpy.take_along_axis", "numpy.split", "numpy.expand_dims", "numpy.take", "numpy.issubdtype", "numpy.squeeze", "numpy.flipud", "numpy.dtype", "numpy.round", "numpy.concatenate", "numpy.mean", "numpy.iinfo", "numpy.tri", "numpy.cross", "numpy.trace", "numpy.roll", "numpy.swapaxes", "numpy.vander", "numpy.pad", "numpy.clip", "numpy.reshape", "numpy.arange", "numpy.eye", "numpy.fliplr", "numpy.version.version.split", "numpy.full", "numpy.finfo", "numpy.tensordot", "numpy.count_nonzero", "numpy.repeat", "numpy.zeros", "numpy.rot90", "numpy.isnan", "numpy.full_like", "numpy.int64", "numpy.append", "numpy.identity", "numpy.argsort", "numpy.array", "numpy.random.RandomState", "numpy.diagonal", "numpy.flip", "numpy.inner", "numpy.tile", "numpy.ones", "numpy.longlong", "numpy.broadcast_to", "numpy.average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DiogoRibeiro7/Machine-Learning
[ "d2c789851f8b4eaf74cdd0c18af072f60cd45cb3", "d2c789851f8b4eaf74cdd0c18af072f60cd45cb3", "d2c789851f8b4eaf74cdd0c18af072f60cd45cb3" ]
[ "Ensemble Learning/AdaBoost.py", "Supervised Learning/TreeRegression.py", "Supervised Learning/LinearRegression.py" ]
[ "\"\"\"\n@Filename: AdaptiveBoost.py\n@Author: Diogo Ribeiro\n@Create Date: 2019-05-03\n@Update Date: 2019-05-03\n@Description: Implement of Adaptive Boosting\n\"\"\"\n\nimport numpy as np\nimport preProcess\nimport pickle\nimport random\nimport SVM\nimport math\n\nclass Adaboost:\n def __init__(self, norm_type=\"Normalization\", iterations=5, base_classifier=\"SVM\"):\n self.iterations = iterations\n self.norm_type = norm_type\n self.base_classifier = SVM.SVMClassifier()\n self.prediction = None\n self.probability = None\n self.classifier_set = None\n\n '''\n Function: baseClassifier\n Description: generate weak classifier\n Input: train_data dataType: ndarray description: train_data\n train_label dataType: ndarray description: train_label\n w dataType: ndarray description: weight\n Output: clf dataType: object description: weak classifier\n weighted_error dataType: float description: weighted error\n base_predictions dataType: object description: base predictions\n\n '''\n def baseClassifier(self, train_data, train_label, w):\n sample_num = len(train_data)\n error_index = np.ones([sample_num, 1])\n clf = self.base_classifier\n clf.train(train_data, train_label)\n base_predictions = np.sign(clf.predict(train_data))\n\n for i in range(sample_num):\n if base_predictions[i] == train_label[i]:\n error_index[i] = 0\n weighted_error = np.dot(w.T, error_index)\n return clf, weighted_error, base_predictions\n\n '''\n Function: updataAlpha\n Description: updata alpha\n Input: error dataType: float description: weighted error\n Output: new_alpha dataType: float description: new alpha\n '''\n def updateAlpha(self, error):\n temp = (1.0 - error)/max(error, 10e-6)\n new_alpha = 1/2 * math.log(temp, math.e)\n return new_alpha\n\n '''\n Function: train\n Description: train the model\n Input: train_data dataType: ndarray description: features\n train_label dataType: ndarray description: labels\n Output: clf_set dataType: list description: classifiers set\n '''\n def train(self, train_data, train_label):\n if self.norm_type == \"Standardization\":\n train_data = preProcess.Standardization(train_data)\n else:\n train_data = preProcess.Normalization(train_data)\n\n train_label = np.expand_dims(train_label, axis=1)\n sample_num = len(train_data)\n\n weak_classifier = []\n\n # initialize weights\n w = np.ones([sample_num, 1])\n w = w/sample_num\n\n # predictions\n agg_predicts = np.zeros([sample_num, 1]) # aggregate value of prediction\n\n # start train\n for i in range(self.iterations):\n base_clf, error, base_prediction = self.baseClassifier(train_data, train_label, w)\n alpha = self.updateAlpha(error)\n weak_classifier.append((alpha, base_clf))\n\n # update parameters in page of 139 Eq.(8.4)\n expon = np.multiply(-1 * alpha * train_label, base_prediction)\n w = np.multiply(w, np.exp(expon))\n w = w/w.sum()\n\n # calculate the total error rate\n agg_predicts += alpha*base_prediction\n error_rate = np.multiply(np.sign(agg_predicts) != train_label, np.ones([sample_num, 1]))\n error_rate = error_rate.sum()/sample_num\n\n if error_rate == 0:\n break\n self.classifier_set = weak_classifier\n return weak_classifier\n\n\n '''\n Function: predict\n Description: predict the testing set\n Input: train_data dataType: ndarray description: features\n prob dataType: bool description: return probaility of label\n Output: prediction dataType: ndarray description: the prediction results for testing set\n '''\n\n def predict(self, test_data, prob=\"False\"):\n # Normalization\n if self.norm_type == \"Standardization\":\n test_data = preProcess.Standardization(test_data)\n else:\n test_data = preProcess.Normalization(test_data)\n\n test_num = test_data.shape[0]\n prediction = np.zeros([test_num, 1])\n probability = np.zeros([test_num, 1])\n\n for classifier in self.classifier_set:\n alpha = classifier[0]\n clf = classifier[1]\n base_prediction = alpha * clf.predict(test_data)\n probability += base_prediction\n\n self.prediction = np.sign(probability)\n self.probability = probability\n if prob:\n return probability\n else:\n return prediction\n\n\n '''\n Function: accuracy\n Description: show detection result\n Input: test_label dataType: ndarray description: labels of test data\n Output: accuracy dataType: float description: detection accuarcy\n '''\n def accuarcy(self, test_label):\n test_label = np.expand_dims(test_label, axis=1)\n prediction = self.prediction\n accuarcy = sum(prediction == test_label)/len(test_label)\n return accuarcy\n\n\n '''\n Function: save\n Description: save the model as pkl\n Input: filename dataType: str description: the path to save model\n '''\n def save(self, filename):\n f = open(filename, 'w')\n pickle.dump(self.classifier_set, f)\n f.close()\n\n '''\n Function: load\n Description: load the model\n Input: filename dataType: str description: the path to save model\n Output: self dataType: obj description: the trained model\n '''\n def load(self, filename):\n f = open(filename)\n self.classifier_set = pickle.load(f)\n return self\n", "\"\"\"\n@ Filename: TreeRegression.py\n@ Author: Diogo Ribeiro\n@ Create Date: 2019-05-11\n@ Update Date: 2019-05-13\n@ Description: Implement TreeRegression\n\"\"\"\n\nimport numpy as np\nimport operator as op\nimport preProcess\nimport math\nimport pickle\n\nclass treeNode():\n def __init__(self, index=-1, value=None, result=None, right_tree=None, left_tree=None):\n self.index = index\n self.value = value\n self.result = result\n self.right_tree = right_tree\n self.left_tree = left_tree\n\n\nclass treeRegression:\n def __init__(self, norm_type=\"Normalization\",iterations=100, error_threshold=1, N=4):\n self.norm_type = norm_type\n self.iterations = iterations\n self.error_threshold = error_threshold # the threshold of error\n self.N = N # the least number of sample for split\n self.tree_node = None\n self.prediction = None\n self.probability = None\n\n '''\n Function: divideData\n Description: divide data into two parts\n Input: data dataType: ndarray description: feature and labels\n index dataType: int description: the column of feature\n value dataType: float description: the value of feature\n Output: left_set dataType: ndarray description: feature <= value\n right_set dataType: ndarray description: feature > value\n '''\n def divideData(self, data, index, value):\n left_set = []\n right_set = []\n # select feature in index with value\n for temp in data:\n if temp[index] >= value:\n # delete this feature\n right_set.append(temp)\n else:\n left_set.append(temp)\n return np.array(left_set), np.array(right_set)\n\n '''\n Function: getVariance\n Description: get the variance of the regression value, in page of 68 Eq.(5.19)\n Input: data dataType: ndarray description: feature and value, the last column is value\n Output: variance dataType: ndarray description: variance\n '''\n def getVariance(self, data):\n variance = np.var(data)\n return variance*len(data)\n\n '''\n Function: getMean\n Description: get the mean of the regression value,in page of 68 Eq.(5.17)\n Input: data dataType: ndarray description: feature and value, the last column is value\n Output: mean dataType: ndarray description: mean\n '''\n def getMean(self, data):\n mean = np.mean(data)\n return mean\n\n '''\n Function: createRegressionTree\n Description: create regression tree\n Input: data dataType: ndarray description: training set\n Output: w dataType: ndarray description: weights\n '''\n def createRegressionTree(self, data):\n # if there is no feature\n if len(data) == 0:\n self.tree_node = treeNode(result=self.getMean(data[:, -1]))\n return self.tree_node\n\n sample_num, feature_dim = np.shape(data)\n\n best_criteria = None\n best_error = np.inf\n best_set = None\n initial_error = self.getVariance(data)\n\n # get the best split feature and value\n for index in range(feature_dim - 1):\n uniques = np.unique(data[:, index])\n for value in uniques:\n left_set, right_set = self.divideData(data, index, value)\n if len(left_set) < self.N or len(right_set) < self.N:\n continue\n new_error = self.getVariance(left_set) + self.getVariance(right_set)\n if new_error < best_error:\n best_criteria = (index, value)\n best_error = new_error\n best_set = (left_set, right_set)\n\n if best_set is None:\n self.tree_node = treeNode(result=self.getMean(data[:, -1]))\n return self.tree_node\n # if the descent of error is small enough, return the mean of the data\n elif abs(initial_error - best_error) < self.error_threshold:\n self.tree_node = treeNode(result=self.getMean(data[:, -1]))\n return self.tree_node\n # if the split data is small enough, return the mean of the data\n elif len(best_set[0]) < self.N or len(best_set[1]) < self.N:\n self.tree_node = treeNode(result=self.getMean(data[:, -1]))\n return self.tree_node\n else:\n ltree = self.createRegressionTree(best_set[0])\n rtree = self.createRegressionTree(best_set[1])\n self.tree_node = treeNode(index=best_criteria[0], value=best_criteria[1], left_tree=ltree, right_tree=rtree)\n return self.tree_node\n\n '''\n Function: train\n Description: train the model\n Input: train_data dataType: ndarray description: features\n train_label dataType: ndarray description: labels\n Output: self dataType: obj description: the trained model\n '''\n def train(self, train_data, train_label, pruning=False, val_data=None, val_label=None):\n # if self.norm_type == \"Standardization\":\n # train_data = preProcess.Standardization(train_data)\n # else:\n # train_data = preProcess.Normalization(train_data)\n\n train_label = np.expand_dims(train_label, axis=1)\n data = np.hstack([train_data, train_label])\n\n self.tree_node = self.createRegressionTree(data)\n #self.printTree(self.tree_node)\n return self\n\n '''\n Function: printTree\n Description: show the structure of the decision tree\n Input: tree dataType: DecisionNode description: decision tree\n '''\n def printTree(self, tree):\n # leaf node\n if tree.result != None:\n print(str(tree.result))\n else:\n # print condition\n print(str(tree.index) + \":\" + str(tree.value))\n # print subtree\n print(\"R->\", self.printTree(tree.right_tree))\n print(\"L->\", self.printTree(tree.left_tree))\n\n '''\n Function: predict\n Description: predict the testing set\n Input: train_data dataType: ndarray description: features\n prob dataType: bool description: return probaility of label\n Output: prediction dataType: ndarray description: the prediction results for testing set\n '''\n def predict(self, test_data, prob=\"False\"):\n # Normalization\n # if self.norm_type == \"Standardization\":\n # test_data = preProcess.Standardization(test_data)\n # else:\n # test_data = preProcess.Normalization(test_data)\n\n test_num = test_data.shape[0]\n prediction = np.zeros([test_num, 1])\n probability = np.zeros([test_num, 1])\n for i in range(test_num):\n prediction[i] = self.classify(test_data[i, :], self.tree_node)\n # probability[i] = result[0][1]/(result[0][1] + result[1][1])\n self.prediction = prediction\n self.probability = probability\n\n return prediction\n\n '''\n Function: classify\n Description: predict the testing set\n Input: sample dataType: ndarray description: input vector to be classified\n Output: label dataType: ndarray description: the prediction results of input\n '''\n def classify(self, sample, tree):\n if tree.result is not None:\n return tree.result\n else:\n value = sample[tree.index]\n if value >= tree.value:\n branch = tree.right_tree\n else:\n branch = tree.left_tree\n return self.classify(sample, branch)\n\n '''\n Function: pruning\n Description: pruning the regression tree\n Input: test_data dataType: ndarray description: features\n test_label dataType: ndarray description: labels\n Output: self dataType: obj description: the trained model\n '''\n def pruning(self, tree, data, alpha):\n\n return 0\n\n\n\n\n\n\n\n '''\n Function: save\n Description: save the model as pkl\n Input: filename dataType: str description: the path to save model\n '''\n\n def save(self, filename):\n f = open(filename, 'w')\n pickle.dump(self.tree_node, f)\n f.close()\n\n '''\n Function: load\n Description: load the model\n Input: filename dataType: str description: the path to save model\n Output: self dataType: obj description: the trained model\n '''\n\n def load(self, filename):\n f = open(filename)\n self.tree_node = pickle.load(f)\n return self\n", "\"\"\"\n@ Filename: Regression.py\n@ Author: Diogo Ribeiro\n@ Create Date: 2019-05-05\n@ Update Date: 2019-05-06\n@ Description: Implement linear regression\n\"\"\"\nimport numpy as np\nimport preProcess\nimport pickle\nimport random\nimport matplotlib.pyplot as plt\n\nclass Regression:\n def __init__(self, norm_type=\"Normalization\",regression_type=\"Standard\", k=1.0, lamda=0.2, learning_rate=0.01, iterations=100):\n self.norm_type = norm_type\n self.regression_type = regression_type\n self.k = k # parameter for local weight linear regression\n self.lamda = lamda # parameter for ridge regression\n self.learning_rate = learning_rate # parameter for forward step regression\n self.iterations = iterations # parameter for forward step regression\n self.w = None\n self.parameters = None\n self.prediction = None\n self.probability = None\n\n '''\n Function: standardLinearRegression\n Description: standard Linear Regression, w =(X.T*X)-1*X.T*y\n Input: x dataType: ndarray description: x\n y dataType: ndarray description: y\n Output: w dataType: ndarray description: weights\n '''\n def standardLinearRegression(self, x, y):\n if self.norm_type == \"Standardization\":\n x = preProcess.Standardization(x)\n else:\n x = preProcess.Normalization(x)\n\n xTx = np.dot(x.T, x)\n if np.linalg.det(xTx) == 0: # calculate the Determinant of xTx\n print(\"Error: Singluar Matrix !\")\n return\n w = np.dot(np.linalg.inv(xTx), np.dot(x.T, y))\n return w\n\n '''\n Function: LWLinearRegression\n Description: locally weighted linear regression, w = (X.T*W*X)-1*X.T*W*y\n Input: x dataType: ndarray description: x\n y dataType: ndarray description: y\n Output: w dataType: ndarray description: weights\n '''\n def LWLinearRegression(self, x, y, sample):\n if self.norm_type == \"Standardization\":\n x = preProcess.Standardization(x)\n else:\n x = preProcess.Normalization(x)\n\n sample_num = len(x)\n weights = np.eye(sample_num)\n for i in range(sample_num):\n diff = sample - x[i, :]\n weights[i, i] = np.exp(np.dot(diff, diff.T)/(-2 * self.k ** 2))\n xTx = np.dot(x.T, np.dot(weights, x))\n if np.linalg.det(xTx) == 0:\n print(\"Error: Singluar Matrix !\")\n return\n result = np.dot(np.linalg.inv(xTx), np.dot(x.T, np.dot(weights, y)))\n return result\n\n '''\n Function: ridgeRegression\n Description: ridge linear regression, w = (X.T*X+ LAMDA I)-1*X.T*y\n Input: x dataType: ndarray description: x\n y dataType: ndarray description: y\n Output: w dataType: ndarray description: weights\n '''\n def ridgeRegression(self, x, y):\n if self.norm_type == \"Standardization\":\n x = preProcess.Standardization(x)\n else:\n x = preProcess.Normalization(x)\n\n feature_dim = len(x[0])\n xTx = np.dot(x.T, x)\n matrix = xTx + np.exp(feature_dim)*self.lamda\n if np.linalg.det(xTx) == 0:\n print(\"Error: Singluar Matrix !\")\n return\n w = np.dot(np.linalg.inv(matrix), np.dot(x.T, y))\n return w\n\n '''\n Function: lasso Regression\n Description: lasso linear regression,\n Input: x dataType: ndarray description: x\n y dataType: ndarray description: y\n Output: w dataType: ndarray description: weights\n '''\n def lassoRegression(self, x, y):\n if self.norm_type == \"Standardization\":\n x = preProcess.Standardization(x)\n else:\n x = preProcess.Normalization(x)\n\n sample_num, feataure_dim = np.shape(x)\n w = np.zeros([feataure_dim, 1])\n for i in range(self.iterations):\n last_w = w\n w[i] = np.dot(x[i, :], (y[i] - x[i, :] * last_w.T))/np.dot(x[i, :], x[i, :].T)\n return w\n\n\n '''\n Function: forwardstep Regression\n Description: forward step linear regression,\n Input: x dataType: ndarray description: x\n y dataType: ndarray description: y\n Output: w dataType: ndarray description: weights\n '''\n def forwardstepRegression(self, x, y):\n if self.norm_type == \"Standardization\":\n x = preProcess.Standardization(x)\n else:\n x = preProcess.Normalization(x)\n\n sample_num, feature_dim = np.shape(x)\n w = np.zeros([self.iterations, feature_dim])\n best_w = np.zeros([feature_dim, 1])\n for i in range(self.iterations):\n min_error = np.inf\n for j in range(feature_dim):\n for sign in [-1, 1]:\n temp_w = best_w\n temp_w[j] += sign * self.learning_rate\n y_hat = np.dot(x, temp_w)\n error = ((y - y_hat) ** 2).sum() # MSE\n if error < min_error: # save the best parameters\n min_error = error\n best_w = temp_w\n w[i, :] = best_w.T\n return w\n\n '''\n Function: train\n Description: train the model\n Input: train_data dataType: ndarray description: features\n train_label dataType: ndarray description: labels\n Output: self dataType: obj description: the trained model\n '''\n\n def train(self, train_data, train_label):\n if self.norm_type == \"Standardization\":\n train_data = preProcess.Standardization(train_data)\n else:\n train_data = preProcess.Normalization(train_data)\n\n if self.regression_type == \"Standard\":\n self.w = self.standardLinearRegression(train_data, train_label)\n elif self.regression_type == \"Localweight\":\n self.w = self.LWLinearRegression(train_data, train_label)\n elif self.regression_type == \"Ridge\":\n self.w = self.ridgeRegression(train_data, train_label)\n elif self.regression_type == \"Lasso\":\n self.w = self.lassoRegression(train_data, train_label)\n elif self.regression_type == \"Forwardstep\":\n self.w = self.forwardstepRegression(train_data, train_label)\n else:\n print(\"Error Regression Type!\")\n return self\n\n '''\n Function: predict\n Description: predict the testing set\n Input: test_data dataType: ndarray description: features\n prob dataType: bool description: return probaility of label\n Output: prediction dataType: ndarray description: the prediction results for testing set\n '''\n def predict(self, x, prob=\"False\"):\n # Normalization\n if self.norm_type == \"Standardization\":\n x = preProcess.Standardization(x)\n else:\n x = preProcess.Normalization(x)\n\n y = np.dot(x, self.w)\n self.prediction = y\n return y\n\n '''\n Function: plot\n Description: show regression result\n Input: test_label dataType: ndarray description: labels of test data\n Output: accuracy dataType: float description: detection accuarcy\n '''\n def plot(self, test_label):\n # test_label = np.expand_dims(test_label, axis=1)\n prediction = self.prediction\n plot1 = plt.plot(test_label, 'r*', label='Regression values')\n plot2 = plt.plot(prediction, 'b', label='Real values')\n plt.xlabel('X ')\n plt.ylabel('Y')\n plt.legend(loc=3)\n plt.title('Regression')\n plt.show()\n\n '''\n Function: save\n Description: save the model as pkl\n Input: filename dataType: str description: the path to save model\n '''\n\n def save(self, filename):\n f = open(filename, 'w')\n pickle.dump(self.w, f)\n f.close()\n\n '''\n Function: load\n Description: load the model\n Input: filename dataType: str description: the path to save model\n Output: self dataType: obj description: the trained model\n '''\n\n def load(self, filename):\n f = open(filename)\n self.w = pickle.load(f)\n return self\n" ]
[ [ "numpy.dot", "numpy.expand_dims", "numpy.multiply", "numpy.ones", "numpy.sign", "numpy.exp", "numpy.zeros" ], [ "numpy.hstack", "numpy.expand_dims", "numpy.unique", "numpy.shape", "numpy.mean", "numpy.var", "numpy.array", "numpy.zeros" ], [ "numpy.dot", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "numpy.linalg.inv", "numpy.eye", "matplotlib.pyplot.plot", "numpy.linalg.det", "numpy.shape", "numpy.exp", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Sebastianvarv/rl-homework
[ "b7526ac3c86cbaae6b796856c31fc4c671a32663" ]
[ "hw1/run_expert.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nCode to load an expert policy and generate roll-out data for behavioral cloning.\nExample usage:\n python run_expert.py experts/Humanoid-v1.pkl Humanoid-v1 --render \\\n --num_rollouts 20\n\nAuthor of this script and included expert policies: Jonathan Ho ([email protected])\n\"\"\"\n\nimport os\nimport pickle\nimport tensorflow as tf\nimport numpy as np\nimport tf_util\nimport gym\nimport load_policy\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('expert_policy_file', type=str)\n parser.add_argument('envname', type=str)\n parser.add_argument('--render', action='store_true')\n parser.add_argument(\"--max_timesteps\", type=int)\n parser.add_argument('--num_rollouts', type=int, default=20,\n help='Number of expert roll outs')\n args = parser.parse_args()\n\n print('loading and building expert policy')\n policy_fn = load_policy.load_policy(args.expert_policy_file)\n print('loaded and built')\n\n with tf.Session():\n tf_util.initialize()\n\n import gym\n env = gym.make(args.envname)\n max_steps = args.max_timesteps or env.spec.timestep_limit\n\n returns = []\n observations = []\n actions = []\n for i in range(args.num_rollouts):\n print('iter', i)\n obs = env.reset()\n done = False\n totalr = 0.\n steps = 0\n while not done:\n action = policy_fn(obs[None,:])\n observations.append(obs)\n actions.append(action)\n obs, r, done, _ = env.step(action)\n totalr += r\n steps += 1\n if args.render:\n env.render()\n if steps % 100 == 0: print(\"%i/%i\"%(steps, max_steps))\n if steps >= max_steps:\n break\n returns.append(totalr)\n\n print('returns', returns)\n print('mean return', np.mean(returns))\n print('std of return', np.std(returns))\n\n expert_data = {'observations': np.array(observations),\n 'actions': np.array(actions)}\n #\n # with open(os.path.join('expert_data', args.envname + '.pkl'), 'wb') as f:\n # pickle.dump(expert_data, f\n # , pickle.HIGHEST_PROTOCOL)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.std", "numpy.array", "numpy.mean", "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
mmiller-max/clearml
[ "fd2d6c6f5d46cad3e406e88eeb4d805455b5b3d8" ]
[ "clearml/storage/helper.py" ]
[ "from __future__ import with_statement\n\nimport errno\nimport getpass\nimport itertools\nimport json\nimport os\nimport shutil\nimport sys\nimport threading\nfrom abc import ABCMeta, abstractmethod\nfrom collections import namedtuple\nfrom concurrent.futures import ThreadPoolExecutor\nfrom copy import copy\nfrom datetime import datetime\nfrom multiprocessing.pool import ThreadPool\nfrom tempfile import mktemp\nfrom time import time\nfrom types import GeneratorType\n\nimport requests\nimport six\nfrom _socket import gethostname\nfrom attr import attrs, attrib, asdict\nfrom furl import furl\nfrom pathlib2 import Path\nfrom requests.exceptions import ConnectionError\nfrom six import binary_type, StringIO\nfrom six.moves.queue import Queue, Empty\nfrom six.moves.urllib.parse import urlparse\nfrom six.moves.urllib.request import url2pathname\n\nfrom .callbacks import UploadProgressReport, DownloadProgressReport\nfrom .util import quote_url\nfrom ..backend_api.utils import get_http_session_with_retry\nfrom ..backend_config.bucket_config import S3BucketConfigurations, GSBucketConfigurations, AzureContainerConfigurations\nfrom ..config import config, deferred_config\nfrom ..debugging import get_logger\nfrom ..errors import UsageError\n\n\nclass StorageError(Exception):\n pass\n\n\nclass DownloadError(Exception):\n pass\n\n\[email protected]_metaclass(ABCMeta)\nclass _Driver(object):\n\n @classmethod\n def get_logger(cls):\n return get_logger('storage')\n\n @abstractmethod\n def get_container(self, container_name, config=None, **kwargs):\n pass\n\n @abstractmethod\n def test_upload(self, test_path, config, **kwargs):\n pass\n\n @abstractmethod\n def upload_object_via_stream(self, iterator, container, object_name, extra, **kwargs):\n pass\n\n @abstractmethod\n def list_container_objects(self, container, ex_prefix, **kwargs):\n pass\n\n @abstractmethod\n def get_direct_access(self, remote_path, **kwargs):\n pass\n\n @abstractmethod\n def download_object(self, obj, local_path, overwrite_existing, delete_on_failure, callback, **kwargs):\n pass\n\n @abstractmethod\n def download_object_as_stream(self, obj, chunk_size, **kwargs):\n pass\n\n @abstractmethod\n def delete_object(self, obj, **kwargs):\n pass\n\n @abstractmethod\n def upload_object(self, file_path, container, object_name, extra, **kwargs):\n pass\n\n @abstractmethod\n def get_object(self, container_name, object_name, **kwargs):\n pass\n\n\nclass StorageHelper(object):\n \"\"\" Storage helper.\n Used by the entire system to download/upload files.\n Supports both local and remote files (currently local files, network-mapped files, HTTP/S and Amazon S3)\n \"\"\"\n _temp_download_suffix = '.partially'\n\n @classmethod\n def _get_logger(cls):\n return get_logger('storage')\n\n @attrs\n class _PathSubstitutionRule(object):\n registered_prefix = attrib(type=str)\n local_prefix = attrib(type=str)\n replace_windows_sep = attrib(type=bool)\n replace_linux_sep = attrib(type=bool)\n\n path_substitution_config = 'storage.path_substitution'\n\n @classmethod\n def load_list_from_config(cls):\n rules_list = []\n for index, sub_config in enumerate(config.get(cls.path_substitution_config, list())):\n rule = cls(\n registered_prefix=sub_config.get('registered_prefix', None),\n local_prefix=sub_config.get('local_prefix', None),\n replace_windows_sep=sub_config.get('replace_windows_sep', False),\n replace_linux_sep=sub_config.get('replace_linux_sep', False),\n )\n\n if any(prefix is None for prefix in (rule.registered_prefix, rule.local_prefix)):\n StorageHelper._get_logger().warning(\n \"Illegal substitution rule configuration '{}[{}]': {}\".format(\n cls.path_substitution_config,\n index,\n asdict(rule),\n ))\n\n continue\n\n if all((rule.replace_windows_sep, rule.replace_linux_sep)):\n StorageHelper._get_logger().warning(\n \"Only one of replace_windows_sep and replace_linux_sep flags may be set.\"\n \"'{}[{}]': {}\".format(\n cls.path_substitution_config,\n index,\n asdict(rule),\n ))\n continue\n\n rules_list.append(rule)\n\n return rules_list\n\n class _UploadData(object):\n @property\n def src_path(self):\n return self._src_path\n\n @property\n def dest_path(self):\n return self._dest_path\n\n @property\n def extra(self):\n return self._extra\n\n @property\n def callback(self):\n return self._callback\n\n @property\n def retries(self):\n return self._retries\n\n def __init__(self, src_path, dest_path, extra, callback, retries):\n self._src_path = src_path\n self._dest_path = dest_path\n self._extra = extra\n self._callback = callback\n self._retries = retries\n\n def __str__(self):\n return \"src=%s\" % self.src_path\n\n _helpers = {} # cache of helper instances\n\n # global terminate event for async upload threads\n _terminate = threading.Event()\n _async_upload_threads = set()\n _upload_pool = None\n\n # collect all bucket credentials that aren't empty (ignore entries with an empty key or secret)\n _s3_configurations = deferred_config('aws.s3', {}, transform=S3BucketConfigurations.from_config)\n _gs_configurations = deferred_config('google.storage', {}, transform=GSBucketConfigurations.from_config)\n _azure_configurations = deferred_config('azure.storage', {}, transform=AzureContainerConfigurations.from_config)\n _path_substitutions = deferred_config(transform=_PathSubstitutionRule.load_list_from_config)\n\n @property\n def log(self):\n return self._log\n\n @property\n def scheme(self):\n return self._scheme\n\n @property\n def secure(self):\n return self._secure\n\n @property\n def base_url(self):\n return self._base_url\n\n @classmethod\n def get(cls, url, logger=None, **kwargs):\n \"\"\"\n Get a storage helper instance for the given URL\n\n :return: A StorageHelper instance.\n \"\"\"\n\n # Handle URL substitution etc before locating the correct storage driver\n url = cls._canonize_url(url)\n\n # Get the credentials we should use for this url\n base_url = cls._resolve_base_url(url)\n\n instance_key = '%s_%s' % (base_url, threading.current_thread().ident or 0)\n\n force_create = kwargs.pop('__force_create', False)\n if (instance_key in cls._helpers) and (not force_create):\n return cls._helpers[instance_key]\n\n # Don't canonize URL since we already did it\n try:\n instance = cls(base_url=base_url, url=url, logger=logger, canonize_url=False, **kwargs)\n except (StorageError, UsageError) as ex:\n cls._get_logger().error(str(ex))\n return None\n except Exception as ex:\n cls._get_logger().error(\"Failed creating storage object {} Reason: {}\".format(\n base_url or url, ex))\n return None\n\n cls._helpers[instance_key] = instance\n return instance\n\n @classmethod\n def get_local_copy(cls, remote_url):\n \"\"\"\n Download a file from remote URL to a local storage, and return path to local copy,\n\n :param remote_url: Remote URL. Example: https://example.com/file.jpg s3://bucket/folder/file.mp4 etc.\n :return: Path to local copy of the downloaded file. None if error occurred.\n \"\"\"\n helper = cls.get(remote_url)\n if not helper:\n return None\n # create temp file with the requested file name\n file_name = '.' + remote_url.split('/')[-1].split(os.path.sep)[-1]\n local_path = mktemp(suffix=file_name)\n return helper.download_to_file(remote_url, local_path)\n\n def __init__(self, base_url, url, key=None, secret=None, region=None, verbose=False, logger=None, retries=5,\n **kwargs):\n level = config.get('storage.log.level', None)\n\n if level:\n try:\n self._get_logger().setLevel(level)\n except (TypeError, ValueError):\n self._get_logger().error('invalid storage log level in configuration: %s' % level)\n\n self._log = logger or self._get_logger()\n self._verbose = verbose\n self._retries = retries\n self._extra = {}\n self._base_url = base_url\n self._secure = True\n self._driver = None\n self._container = None\n self._conf = None\n\n if kwargs.get('canonize_url', True):\n url = self._canonize_url(url)\n\n parsed = urlparse(url)\n self._scheme = parsed.scheme\n\n if self._scheme == _AzureBlobServiceStorageDriver.scheme:\n self._conf = copy(self._azure_configurations.get_config_by_uri(url))\n if self._conf is None:\n raise StorageError(\"Missing Azure Blob Storage configuration for {}\".format(url))\n\n if not self._conf.account_name or not self._conf.account_key:\n raise StorageError(\n \"Missing account name or key for Azure Blob Storage access for {}\".format(base_url)\n )\n\n self._driver = _AzureBlobServiceStorageDriver()\n self._container = self._driver.get_container(config=self._conf)\n\n elif self._scheme == _Boto3Driver.scheme:\n self._conf = copy(self._s3_configurations.get_config_by_uri(url))\n self._secure = self._conf.secure\n\n final_region = region if region else self._conf.region\n if not final_region:\n final_region = None\n\n self._conf.update(\n key=key or self._conf.key,\n secret=secret or self._conf.secret,\n multipart=self._conf.multipart,\n region=final_region,\n use_credentials_chain=self._conf.use_credentials_chain\n )\n\n if not self._conf.use_credentials_chain:\n if not self._conf.key or not self._conf.secret:\n raise ValueError(\n \"Missing key and secret for S3 storage access (%s)\" % base_url\n )\n\n self._driver = _Boto3Driver()\n self._container = self._driver.get_container(container_name=self._base_url, retries=retries,\n config=self._conf)\n\n elif self._scheme == _GoogleCloudStorageDriver.scheme:\n self._conf = copy(self._gs_configurations.get_config_by_uri(url))\n self._driver = _GoogleCloudStorageDriver()\n self._container = self._driver.get_container(\n container_name=self._base_url,\n config=self._conf\n )\n\n elif self._scheme in _HttpDriver.schemes:\n self._driver = _HttpDriver(retries=retries)\n self._container = self._driver.get_container(container_name=self._base_url)\n else: # elif self._scheme == 'file':\n # if this is not a known scheme assume local file\n\n # If the scheme is file, use only the path segment, If not, use the entire URL\n if self._scheme == 'file':\n url = parsed.path\n\n url = url.replace(\"\\\\\", \"/\")\n\n # url2pathname is specifically intended to operate on (urlparse result).path\n # and returns a cross-platform compatible result\n driver_uri = url2pathname(url)\n path_driver_uri = Path(driver_uri)\n # if path_driver_uri.is_file():\n # driver_uri = str(path_driver_uri.parent)\n # elif not path_driver_uri.exists():\n # # assume a folder and create\n # # Path(driver_uri).mkdir(parents=True, exist_ok=True)\n # pass\n\n self._driver = _FileStorageDriver(str(path_driver_uri.root))\n self._container = None\n\n @classmethod\n def terminate_uploads(cls, force=True, timeout=2.0):\n if force:\n # since async uploaders are daemon threads, we can just return and let them close by themselves\n return\n # signal all threads to terminate and give them a chance for 'timeout' seconds (total, not per-thread)\n cls._terminate.set()\n remaining_timeout = timeout\n for thread in cls._async_upload_threads:\n t = time()\n try:\n thread.join(timeout=remaining_timeout)\n except Exception:\n pass\n remaining_timeout -= (time() - t)\n\n @classmethod\n def get_configuration(cls, bucket_config):\n return cls._s3_configurations.get_config_by_bucket(bucket_config.bucket, bucket_config.host)\n\n @classmethod\n def add_configuration(cls, bucket_config, log=None, _test_config=True):\n # Try to use existing configuration if we have no key and secret\n use_existing = not bucket_config.is_valid()\n\n # Get existing config anyway (we'll either try to use it or alert we're replacing it\n existing = cls.get_configuration(bucket_config)\n\n configs = cls._s3_configurations\n\n if not use_existing:\n # Test bucket config, fails if unsuccessful\n if _test_config:\n _Boto3Driver._test_bucket_config(bucket_config, log)\n\n if existing:\n if log:\n log.warning('Overriding existing configuration for %s/%s'\n % (existing.host or 'AWS', existing.bucket))\n configs.remove_config(existing)\n else:\n # Try to use existing configuration\n good_config = False\n if existing:\n if log:\n log.info('Using existing credentials for bucket %s/%s'\n % (bucket_config.host or 'AWS', bucket_config.bucket))\n good_config = _Boto3Driver._test_bucket_config(existing, log, raise_on_error=False)\n\n if not good_config:\n # Try to use global key/secret\n configs.update_config_with_defaults(bucket_config)\n\n if log:\n log.info('Using global credentials for bucket %s/%s'\n % (bucket_config.host or 'AWS', bucket_config.bucket))\n if _test_config:\n _Boto3Driver._test_bucket_config(bucket_config, log)\n else:\n # do not add anything, existing config is OK\n return\n\n configs.add_config(bucket_config)\n\n @classmethod\n def add_path_substitution(\n cls,\n registered_prefix,\n local_prefix,\n replace_windows_sep=False,\n replace_linux_sep=False,\n ):\n \"\"\"\n Add a path substitution rule for storage paths.\n\n Useful for case where the data was registered under some path, and that\n path was later renamed. This may happen with local storage paths where\n each machine is has different mounts or network drives configurations\n\n :param registered_prefix: The prefix to search for and replace. This is\n the prefix of the path the data is registered under. This should be the\n exact url prefix, case sensitive, as the data is registered.\n :param local_prefix: The prefix to replace 'registered_prefix' with. This\n is the prefix of the path the data is actually saved under. This should be the\n exact url prefix, case sensitive, as the data is saved under.\n :param replace_windows_sep: If set to True, and the prefix matches, the rest\n of the url has all of the windows path separators (backslash '\\') replaced with\n the native os path separator.\n :param replace_linux_sep: If set to True, and the prefix matches, the rest\n of the url has all of the linux/unix path separators (slash '/') replaced with\n the native os path separator.\n \"\"\"\n\n if not registered_prefix or not local_prefix:\n raise UsageError(\"Path substitution prefixes must be non empty strings\")\n\n if replace_windows_sep and replace_linux_sep:\n raise UsageError(\"Only one of replace_windows_sep and replace_linux_sep may be set.\")\n\n rule = cls._PathSubstitutionRule(\n registered_prefix=registered_prefix,\n local_prefix=local_prefix,\n replace_windows_sep=replace_windows_sep,\n replace_linux_sep=replace_linux_sep,\n )\n\n cls._path_substitutions.append(rule)\n\n @classmethod\n def clear_path_substitutions(cls):\n \"\"\"\n Removes all path substitution rules, including ones from the configuration file.\n \"\"\"\n cls._path_substitutions = list()\n\n def verify_upload(self, folder_uri='', raise_on_error=True, log_on_error=True):\n \"\"\"\n Verify that this helper can upload files to a folder.\n\n An upload is possible iff:\n 1. the destination folder is under the base uri of the url used to create the helper\n 2. the helper has credentials to write to the destination folder\n\n :param folder_uri: The destination folder to test. Must be an absolute\n url that begins with the base uri of the url used to create the helper.\n :param raise_on_error: Raise an exception if an upload is not possible\n :param log_on_error: Log an error if an upload is not possible\n :return: True, if, and only if, an upload to folder_uri is possible.\n \"\"\"\n\n folder_uri = self._canonize_url(folder_uri)\n\n folder_uri = self.conform_url(folder_uri, self._base_url)\n\n test_path = self._normalize_object_name(folder_uri)\n\n if self._scheme == _Boto3Driver.scheme:\n _Boto3Driver._test_bucket_config(\n self._conf,\n self._log,\n test_path=test_path,\n raise_on_error=raise_on_error,\n log_on_error=log_on_error,\n )\n elif self._scheme == _GoogleCloudStorageDriver.scheme:\n self._driver.test_upload(test_path, self._conf)\n\n elif self._scheme == 'file':\n # Check path exists\n Path(test_path).mkdir(parents=True, exist_ok=True)\n # check path permissions\n Path(test_path).touch(exist_ok=True)\n\n return folder_uri\n\n def upload_from_stream(self, stream, dest_path, extra=None, retries=1):\n dest_path = self._canonize_url(dest_path)\n object_name = self._normalize_object_name(dest_path)\n extra = extra.copy() if extra else {}\n extra.update(self._extra)\n last_ex = None\n cb = UploadProgressReport.from_stream(stream, object_name, self._verbose, self._log)\n for i in range(max(1, retries)):\n try:\n self._driver.upload_object_via_stream(\n iterator=stream,\n container=self._container,\n object_name=object_name,\n callback=cb,\n extra=extra)\n last_ex = None\n break\n except Exception as ex:\n last_ex = ex\n # seek to beginning if possible\n # noinspection PyBroadException\n try:\n stream.seek(0)\n except Exception:\n pass\n if last_ex:\n raise last_ex\n\n if self.scheme in _HttpDriver.schemes:\n # quote link\n dest_path = quote_url(dest_path)\n\n return dest_path\n\n def upload(self, src_path, dest_path=None, extra=None, async_enable=False, cb=None, retries=1):\n if not dest_path:\n dest_path = os.path.basename(src_path)\n\n dest_path = self._canonize_url(dest_path)\n\n if cb and self.scheme in _HttpDriver.schemes:\n # store original callback\n a_cb = cb\n\n # quote link\n def callback(a_path):\n return a_cb(quote_url(a_path) if a_path else a_path)\n # replace callback with wrapper\n cb = callback\n\n if async_enable:\n data = self._UploadData(src_path=src_path, dest_path=dest_path, extra=extra, callback=cb, retries=retries)\n StorageHelper._initialize_upload_pool()\n return StorageHelper._upload_pool.apply_async(self._do_async_upload, args=(data,))\n else:\n res = self._do_upload(src_path, dest_path, extra, cb, verbose=False, retries=retries)\n if res:\n res = quote_url(res)\n return res\n\n def list(self, prefix=None):\n \"\"\"\n List entries in the helper base path.\n\n Return a list of names inside this helper base path. The base path is\n determined at creation time and is specific for each storage medium.\n For Google Storage and S3 it is the bucket of the path.\n For local files it is the root directory.\n\n This operation is not supported for http and https protocols.\n\n :param prefix: If None, return the list as described above. If not, it\n must be a string - the path of a sub directory under the base path.\n the returned list will include only objects under that subdir.\n\n :return: The paths of all the objects in the storage base\n path under prefix. Listed relative to the base path.\n\n \"\"\"\n\n if prefix:\n if prefix.startswith(self._base_url):\n prefix = prefix[len(self.base_url):].lstrip(\"/\")\n\n try:\n res = self._driver.list_container_objects(self._container, ex_prefix=prefix)\n except TypeError:\n res = self._driver.list_container_objects(self._container)\n\n return [\n obj.name\n for obj in res if\n obj.name.startswith(prefix) and obj.name != prefix\n ]\n else:\n return [obj.name for obj in self._driver.list_container_objects(self._container)]\n\n def download_to_file(self, remote_path, local_path, overwrite_existing=False, delete_on_failure=True, verbose=None):\n def next_chunk(astream):\n if isinstance(astream, binary_type):\n chunk = astream\n astream = None\n elif astream:\n try:\n chunk = next(astream)\n except StopIteration:\n chunk = None\n else:\n chunk = None\n return chunk, astream\n\n remote_path = self._canonize_url(remote_path)\n verbose = self._verbose if verbose is None else verbose\n\n # Check if driver type supports direct access:\n direct_access_path = self._driver.get_direct_access(remote_path)\n if direct_access_path:\n return direct_access_path\n\n temp_local_path = None\n try:\n if verbose:\n self._log.info('Start downloading from %s' % remote_path)\n if not overwrite_existing and Path(local_path).is_file():\n self._log.warning(\n 'File {} already exists, no need to download, thread id = {}'.format(\n local_path,\n threading.current_thread().ident,\n ),\n )\n\n return local_path\n # we download into temp_local_path so that if we accidentally stop in the middle,\n # we won't think we have the entire file\n temp_local_path = '{}_{}{}'.format(local_path, time(), self._temp_download_suffix)\n obj = self._get_object(remote_path)\n if not obj:\n return None\n\n # object size in bytes\n total_size_mb = -1\n dl_total_mb = 0.\n download_reported = False\n # chunks size is ignored and always 5Mb\n chunk_size_mb = 5\n\n # make sure we have the destination folder\n # noinspection PyBroadException\n Path(temp_local_path).parent.mkdir(parents=True, exist_ok=True)\n\n # try to get file size\n try:\n if isinstance(self._driver, _HttpDriver) and obj:\n obj = self._driver._get_download_object(obj)\n total_size_mb = float(obj.headers.get('Content-Length', 0)) / (1024 * 1024)\n elif hasattr(obj, 'size'):\n size = obj.size\n # Google storage has the option to reload the object to get the size\n if size is None and hasattr(obj, 'reload'):\n obj.reload()\n size = obj.size\n\n total_size_mb = 0 if size is None else float(size) / (1024 * 1024)\n elif hasattr(obj, 'content_length'):\n total_size_mb = float(obj.content_length) / (1024 * 1024)\n except (ValueError, AttributeError, KeyError):\n pass\n\n # if driver supports download with callback, use it (it might be faster)\n if hasattr(self._driver, 'download_object'):\n # callback\n cb = DownloadProgressReport(total_size_mb, verbose, remote_path, self._log)\n self._driver.download_object(obj, temp_local_path, callback=cb)\n download_reported = bool(cb.last_reported)\n dl_total_mb = cb.current_status_mb\n else:\n stream = self._driver.download_object_as_stream(obj, chunk_size_mb * 1024 * 1024)\n if stream is None:\n raise ValueError('Could not download %s' % remote_path)\n with open(temp_local_path, 'wb') as fd:\n data, stream = next_chunk(stream)\n while data:\n fd.write(data)\n data, stream = next_chunk(stream)\n\n if Path(temp_local_path).stat().st_size <= 0:\n raise Exception('downloaded a 0-sized file')\n\n # if we are on windows, we need to remove the target file before renaming\n # otherwise posix rename will overwrite the target\n if os.name != 'posix':\n try:\n os.remove(local_path)\n except Exception:\n pass\n\n # rename temp file to local_file\n # noinspection PyBroadException\n try:\n os.rename(temp_local_path, local_path)\n except Exception:\n # noinspection PyBroadException\n try:\n os.unlink(temp_local_path)\n except Exception:\n pass\n # file was downloaded by a parallel process, check we have the final output and delete the partial copy\n path_local_path = Path(local_path)\n if not path_local_path.is_file() or path_local_path.stat().st_size <= 0:\n raise Exception('Failed renaming partial file, downloaded file exists and a 0-sized file')\n\n # report download if we are on the second chunk\n if verbose or download_reported:\n self._log.info(\n 'Downloaded %.2f MB successfully from %s , saved to %s' % (dl_total_mb, remote_path, local_path))\n return local_path\n except DownloadError:\n raise\n except Exception as e:\n self._log.error(\"Could not download {} , err: {} \".format(remote_path, e))\n if delete_on_failure:\n # noinspection PyBroadException\n try:\n if temp_local_path:\n os.remove(temp_local_path)\n except Exception:\n pass\n return None\n\n def download_as_stream(self, remote_path, chunk_size=None):\n remote_path = self._canonize_url(remote_path)\n try:\n obj = self._get_object(remote_path)\n return self._driver.download_object_as_stream(\n obj, chunk_size=chunk_size, verbose=self._verbose, log=self.log\n )\n except DownloadError:\n raise\n except Exception as e:\n self._log.error(\"Could not download file : %s, err:%s \" % (remote_path, str(e)))\n return None\n\n def download_as_nparray(self, remote_path, chunk_size=None):\n try:\n stream = self.download_as_stream(remote_path, chunk_size)\n if stream is None:\n return\n\n # TODO: ugly py3 hack, please remove ASAP\n if six.PY3 and not isinstance(stream, GeneratorType):\n import numpy as np\n return np.frombuffer(stream, dtype=np.uint8)\n else:\n import numpy as np\n return np.asarray(bytearray(b''.join(stream)), dtype=np.uint8)\n\n except Exception as e:\n self._log.error(\"Could not download file : %s, err:%s \" % (remote_path, str(e)))\n\n def delete(self, path):\n return self._driver.delete_object(self._get_object(path))\n\n def check_write_permissions(self, dest_path=None):\n # create a temporary file, then delete it\n base_url = dest_path or self._base_url\n dest_path = base_url + '/.clearml.test'\n # do not check http/s connection permissions\n if dest_path.startswith('http'):\n return True\n try:\n self.upload_from_stream(stream=six.BytesIO(b'clearml'), dest_path=dest_path)\n self.delete(path=dest_path)\n except Exception:\n raise ValueError('Insufficient permissions for {}'.format(base_url))\n return True\n\n @classmethod\n def download_from_url(cls, remote_url, local_path, overwrite_existing=False):\n \"\"\"\n Download a file from remote URL to a local storage\n\n :param remote_url: Remote URL. Example: https://example.com/image.jpg or s3://bucket/folder/file.mp4 etc.\n :param local_path: target location for downloaded file. Example: /tmp/image.jpg\n :param overwrite_existing: If True and local_path exists, it will overwrite it, otherwise print warning\n :return: local_path if download was successful.\n \"\"\"\n helper = cls.get(remote_url)\n if not helper:\n return None\n return helper.download_to_file(remote_url, local_path, overwrite_existing=overwrite_existing)\n\n @classmethod\n def _canonize_url(cls, url):\n return cls._apply_url_substitutions(url)\n\n @classmethod\n def _apply_url_substitutions(cls, url):\n def replace_separator(_url, where, sep):\n return _url[:where] + _url[where:].replace(sep, os.sep)\n\n for index, rule in enumerate(cls._path_substitutions):\n if url.startswith(rule.registered_prefix):\n url = url.replace(\n rule.registered_prefix,\n rule.local_prefix,\n 1, # count. str.replace() does not support keyword arguments\n )\n\n if rule.replace_windows_sep:\n url = replace_separator(url, len(rule.local_prefix), '\\\\')\n\n if rule.replace_linux_sep:\n url = replace_separator(url, len(rule.local_prefix), '/')\n\n break\n\n return url\n\n @classmethod\n def _resolve_base_url(cls, base_url):\n parsed = urlparse(base_url)\n if parsed.scheme == _Boto3Driver.scheme:\n conf = cls._s3_configurations.get_config_by_uri(base_url)\n bucket = conf.bucket\n if not bucket:\n parts = Path(parsed.path.strip('/')).parts\n if parts:\n bucket = parts[0]\n return '/'.join(x for x in ('s3:/', conf.host, bucket) if x)\n elif parsed.scheme == _AzureBlobServiceStorageDriver.scheme:\n conf = cls._azure_configurations.get_config_by_uri(base_url)\n if not conf:\n raise StorageError(\"Can't find azure configuration for {}\".format(base_url))\n return str(furl(base_url).set(path=conf.container_name))\n elif parsed.scheme == _GoogleCloudStorageDriver.scheme:\n conf = cls._gs_configurations.get_config_by_uri(base_url)\n return str(furl(scheme=parsed.scheme, netloc=conf.bucket))\n elif parsed.scheme == 'http':\n return 'http://'\n elif parsed.scheme == 'https':\n return 'https://'\n else: # if parsed.scheme == 'file':\n # if we do not know what it is, we assume file\n return 'file://'\n\n @classmethod\n def conform_url(cls, folder_uri, base_url=None):\n if not folder_uri:\n return folder_uri\n _base_url = cls._resolve_base_url(folder_uri) if not base_url else base_url\n\n if not folder_uri.startswith(_base_url):\n prev_folder_uri = folder_uri\n if _base_url == 'file://':\n folder_uri = str(Path(folder_uri).absolute())\n if folder_uri.startswith('/'):\n folder_uri = _base_url + folder_uri\n else:\n folder_uri = '/'.join((_base_url, folder_uri))\n\n cls._get_logger().debug('Upload destination {} amended to {} for registration purposes'.format(\n prev_folder_uri, folder_uri))\n else:\n raise ValueError('folder_uri: {} does not start with base url: {}'.format(folder_uri, _base_url))\n\n return folder_uri\n\n def _absolute_object_name(self, path):\n \"\"\" Returns absolute remote path, including any prefix that is handled by the container \"\"\"\n if not path.startswith(self.base_url):\n return self.base_url.rstrip('/') + '///' + path.lstrip('/')\n return path\n\n def _normalize_object_name(self, path):\n \"\"\" Normalize remote path. Remove any prefix that is already handled by the container \"\"\"\n if path.startswith(self.base_url):\n path = path[len(self.base_url):]\n if path.startswith('/') and os.name == 'nt':\n path = path[1:]\n if self.scheme in (_Boto3Driver.scheme, _GoogleCloudStorageDriver.scheme,\n _AzureBlobServiceStorageDriver.scheme):\n path = path.lstrip('/')\n return path\n\n def _do_async_upload(self, data):\n assert isinstance(data, self._UploadData)\n return self._do_upload(data.src_path, data.dest_path, extra=data.extra, cb=data.callback,\n verbose=True, retries=data.retries)\n\n def _upload_from_file(self, local_path, dest_path, extra=None):\n if not hasattr(self._driver, 'upload_object'):\n with open(local_path, 'rb') as stream:\n res = self.upload_from_stream(stream=stream, dest_path=dest_path, extra=extra)\n else:\n object_name = self._normalize_object_name(dest_path)\n extra = extra.copy() if extra else {}\n extra.update(self._extra)\n cb = UploadProgressReport.from_file(local_path, self._verbose, self._log)\n res = self._driver.upload_object(\n file_path=local_path,\n container=self._container,\n object_name=object_name,\n callback=cb,\n extra=extra)\n return res\n\n def _do_upload(self, src_path, dest_path, extra=None, cb=None, verbose=False, retries=1):\n object_name = self._normalize_object_name(dest_path)\n if cb:\n try:\n cb(None)\n except Exception as e:\n self._log.error(\"Calling upload callback when starting upload: %s\" % str(e))\n if verbose:\n msg = 'Starting upload: {} => {}{}'.format(\n src_path,\n (self._container.name if self._container.name.endswith('/') else self._container.name + '/')\n if self._container and self._container.name else '', object_name)\n if object_name.startswith('file://') or object_name.startswith('/'):\n self._log.debug(msg)\n else:\n self._log.info(msg)\n last_ex = None\n for i in range(max(1, retries)):\n try:\n if not self._upload_from_file(local_path=src_path, dest_path=dest_path, extra=extra):\n # retry if failed\n last_ex = ValueError(\"Upload failed\")\n continue\n last_ex = None\n break\n except Exception as e:\n last_ex = e\n\n if last_ex:\n self._log.error(\"Exception encountered while uploading %s\" % str(last_ex))\n if cb:\n try:\n cb(False)\n except Exception as e:\n self._log.warning(\"Exception on upload callback: %s\" % str(e))\n raise last_ex\n\n if verbose:\n self._log.debug(\"Finished upload: %s => %s\" % (src_path, object_name))\n if cb:\n try:\n cb(dest_path)\n except Exception as e:\n self._log.warning(\"Exception on upload callback: %s\" % str(e))\n\n return dest_path\n\n def _get_object(self, path):\n object_name = self._normalize_object_name(path)\n try:\n return self._driver.get_object(\n container_name=self._container.name if self._container else '', object_name=object_name)\n except ConnectionError:\n raise DownloadError\n except Exception as e:\n self.log.warning('Storage helper problem for {}: {}'.format(str(object_name), str(e)))\n return None\n\n @staticmethod\n def _initialize_upload_pool():\n if not StorageHelper._upload_pool:\n StorageHelper._upload_pool = ThreadPool(processes=1)\n\n @staticmethod\n def close_async_threads():\n if StorageHelper._upload_pool:\n pool = StorageHelper._upload_pool\n StorageHelper._upload_pool = None\n # noinspection PyBroadException\n try:\n pool.terminate()\n pool.join()\n except Exception:\n pass\n\n\nclass _HttpDriver(_Driver):\n \"\"\" LibCloud http/https adapter (simple, enough for now) \"\"\"\n\n timeout = (5.0, 30.)\n min_kbps_speed = 50\n\n schemes = ('http', 'https')\n\n class _Container(object):\n _default_backend_session = None\n _default_files_server_host = None\n\n def __init__(self, name, retries=5, **kwargs):\n self.name = name\n self.session = get_http_session_with_retry(total=retries, connect=retries, read=retries, redirect=retries)\n\n def get_headers(self, url):\n if not self._default_backend_session:\n from ..backend_interface.base import InterfaceBase\n self._default_backend_session = InterfaceBase._get_default_session()\n if self._default_files_server_host is None:\n self._default_files_server_host = self._default_backend_session.get_files_server_host().rstrip('/')\n\n if url == self._default_files_server_host or url.startswith(self._default_files_server_host + '/'):\n return self._default_backend_session.add_auth_headers({})\n return None\n\n class _HttpSessionHandle(object):\n def __init__(self, url, is_stream, container_name, object_name):\n self.url, self.is_stream, self.container_name, self.object_name = \\\n url, is_stream, container_name, object_name\n\n def __init__(self, retries=5):\n self._retries = retries\n self._containers = {}\n\n def get_container(self, container_name, config=None, **kwargs):\n if container_name not in self._containers:\n self._containers[container_name] = self._Container(name=container_name, retries=self._retries, **kwargs)\n return self._containers[container_name]\n\n def upload_object_via_stream(self, iterator, container, object_name, extra=None, callback=None, **kwargs):\n url = object_name[:object_name.index('/')]\n url_path = object_name[len(url) + 1:]\n full_url = container.name + url\n # when sending data in post, there is no connection timeout, just an entire upload timeout\n timeout = self.timeout[-1]\n stream_size = 0\n if hasattr(iterator, 'tell') and hasattr(iterator, 'seek'):\n pos = iterator.tell()\n iterator.seek(0, 2)\n stream_size = iterator.tell() - pos\n iterator.seek(pos, 0)\n timeout = max(timeout, (stream_size / 1024) / float(self.min_kbps_speed))\n\n res = container.session.post(full_url, files={url_path: iterator}, timeout=timeout,\n headers=container.get_headers(full_url))\n if res.status_code != requests.codes.ok:\n raise ValueError('Failed uploading object %s (%d): %s' % (object_name, res.status_code, res.text))\n\n # call back is useless because we are not calling it while uploading...\n\n # if callback and stream_size:\n # try:\n # callback(stream_size)\n # except Exception as ex:\n # log.debug('Exception raised when running callback function: %s' % ex)\n return res\n\n def list_container_objects(self, *args, **kwargs):\n raise NotImplementedError('List is not implemented for http protocol')\n\n def delete_object(self, obj, *args, **kwargs):\n assert isinstance(obj, self._HttpSessionHandle)\n container = self._containers[obj.container_name]\n res = container.session.delete(obj.url, headers=container.get_headers(obj.url))\n if res.status_code != requests.codes.ok:\n self._get_logger().warning('Failed deleting object %s (%d): %s' % (\n obj.object_name, res.status_code, res.text))\n return False\n return True\n\n def get_object(self, container_name, object_name, *args, **kwargs):\n is_stream = kwargs.get('stream', True)\n url = ''.join((container_name, object_name.lstrip('/')))\n return self._HttpSessionHandle(url, is_stream, container_name, object_name)\n\n def _get_download_object(self, obj):\n # bypass for session result\n if not isinstance(obj, self._HttpSessionHandle):\n return obj\n\n container = self._containers[obj.container_name]\n # set stream flag before we send the request\n container.session.stream = obj.is_stream\n res = container.session.get(obj.url, timeout=self.timeout, headers=container.get_headers(obj.url))\n if res.status_code != requests.codes.ok:\n raise ValueError('Failed getting object %s (%d): %s' % (obj.object_name, res.status_code, res.text))\n return res\n\n def download_object_as_stream(self, obj, chunk_size=64 * 1024, **_):\n # return iterable object\n obj = self._get_download_object(obj)\n return obj.iter_content(chunk_size=chunk_size)\n\n def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):\n obj = self._get_download_object(obj)\n p = Path(local_path)\n if not overwrite_existing and p.is_file():\n self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))\n return\n length = 0\n with p.open(mode='wb') as f:\n for chunk in obj.iter_content(chunk_size=5 * 1024 * 1024):\n # filter out keep-alive new chunks\n if not chunk:\n continue\n chunk_size = len(chunk)\n f.write(chunk)\n length += chunk_size\n if callback:\n callback(chunk_size)\n\n return length\n\n def get_direct_access(self, remote_path, **_):\n return None\n\n def test_upload(self, test_path, config, **kwargs):\n return True\n\n def upload_object(self, file_path, container, object_name, extra, callback=None, **kwargs):\n with open(file_path, 'rb') as stream:\n return self.upload_object_via_stream(iterator=stream, container=container,\n object_name=object_name, extra=extra, callback=callback, **kwargs)\n\n\nclass _Stream(object):\n encoding = None\n mode = 'rw'\n name = ''\n newlines = '\\n'\n softspace = False\n\n def __init__(self, input_iterator=None):\n self.closed = False\n self._buffer = Queue()\n self._input_iterator = input_iterator\n self._leftover = None\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return self.next()\n\n def close(self):\n self.closed = True\n\n def flush(self):\n pass\n\n def fileno(self):\n return 87\n\n def isatty(self):\n return False\n\n def next(self):\n while not self.closed or not self._buffer.empty():\n # input stream\n if self._input_iterator:\n try:\n chunck = next(self._input_iterator)\n return chunck\n except StopIteration:\n self.closed = True\n raise StopIteration()\n except Exception as ex:\n _Driver.get_logger().error('Failed downloading: %s' % ex)\n else:\n # in/out stream\n try:\n return self._buffer.get(block=True, timeout=1.)\n except Empty:\n pass\n\n raise StopIteration()\n\n def read(self, size=None):\n try:\n data = self.next() if self._leftover is None else self._leftover\n except StopIteration:\n return six.b('')\n\n self._leftover = None\n try:\n while size is None or not data or len(data) < size:\n chunk = self.next()\n if chunk is not None:\n if data is not None:\n data += chunk\n else:\n data = chunk\n except StopIteration:\n pass\n\n if size is not None and data and len(data) > size:\n self._leftover = data[size:]\n return data[:size]\n\n return data\n\n def readline(self, size=None):\n return self.read(size)\n\n def readlines(self, sizehint=None):\n pass\n\n def truncate(self, size=None):\n pass\n\n def write(self, bytes):\n self._buffer.put(bytes, block=True)\n\n def writelines(self, sequence):\n for s in sequence:\n self.write(s)\n\n\nclass _Boto3Driver(_Driver):\n \"\"\" Boto3 storage adapter (simple, enough for now) \"\"\"\n\n _min_pool_connections = 512\n _max_multipart_concurrency = deferred_config('aws.boto3.max_multipart_concurrency', 16)\n _pool_connections = deferred_config('aws.boto3.pool_connections', 512)\n\n _stream_download_pool_connections = 128\n _stream_download_pool = None\n\n _containers = {}\n\n scheme = 's3'\n scheme_prefix = str(furl(scheme=scheme, netloc=''))\n\n _bucket_location_failure_reported = set()\n\n class _Container(object):\n _creation_lock = threading.Lock()\n\n def __init__(self, name, cfg):\n try:\n import boto3\n import botocore.client\n from botocore.exceptions import ClientError # noqa: F401\n except ImportError:\n raise UsageError(\n 'AWS S3 storage driver (boto3) not found. '\n 'Please install driver using: pip install \\\"boto3>=1.9\\\"'\n )\n\n # skip 's3://'\n self.name = name[5:]\n endpoint = (('https://' if cfg.secure else 'http://') + cfg.host) if cfg.host else None\n\n # boto3 client creation isn't thread-safe (client itself is)\n with self._creation_lock:\n boto_kwargs = {\n \"endpoint_url\": endpoint,\n \"use_ssl\": cfg.secure,\n \"verify\": cfg.verify,\n \"config\": botocore.client.Config(\n max_pool_connections=max(\n _Boto3Driver._min_pool_connections,\n _Boto3Driver._pool_connections)\n )\n }\n if not cfg.use_credentials_chain:\n boto_kwargs[\"aws_access_key_id\"] = cfg.key\n boto_kwargs[\"aws_secret_access_key\"] = cfg.secret\n\n self.resource = boto3.resource(\n 's3',\n **boto_kwargs\n )\n\n self.config = cfg\n bucket_name = self.name[len(cfg.host) + 1:] if cfg.host else self.name\n self.bucket = self.resource.Bucket(bucket_name)\n\n @attrs\n class ListResult(object):\n name = attrib(default=None)\n\n def __init__(self):\n pass\n\n def _get_stream_download_pool(self):\n if self._stream_download_pool is None:\n self._stream_download_pool = ThreadPoolExecutor(max_workers=self._stream_download_pool_connections)\n return self._stream_download_pool\n\n def get_container(self, container_name, config=None, **kwargs):\n if container_name not in self._containers:\n self._containers[container_name] = self._Container(name=container_name, cfg=config)\n self._containers[container_name].config.retries = kwargs.get('retries', 5)\n return self._containers[container_name]\n\n def upload_object_via_stream(self, iterator, container, object_name, callback=None, extra=None, **kwargs):\n import boto3.s3.transfer\n stream = _Stream(iterator)\n try:\n container.bucket.upload_fileobj(stream, object_name, Config=boto3.s3.transfer.TransferConfig(\n use_threads=container.config.multipart,\n max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,\n num_download_attempts=container.config.retries),\n Callback=callback,\n )\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n return True\n\n def upload_object(self, file_path, container, object_name, callback=None, extra=None, **kwargs):\n import boto3.s3.transfer\n try:\n container.bucket.upload_file(file_path, object_name, Config=boto3.s3.transfer.TransferConfig(\n use_threads=container.config.multipart,\n max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,\n num_download_attempts=container.config.retries),\n Callback=callback)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n return True\n\n def list_container_objects(self, container, ex_prefix=None, **kwargs):\n if ex_prefix:\n res = container.bucket.objects.filter(Prefix=ex_prefix)\n else:\n res = container.bucket.objects.all()\n for res in res:\n yield self.ListResult(name=res.key)\n\n def delete_object(self, object, **kwargs):\n from botocore.exceptions import ClientError\n object.delete()\n try:\n # Try loading the file to verify deletion\n object.load()\n return False\n except ClientError as e:\n return int(e.response['Error']['Code']) == 404\n\n def get_object(self, container_name, object_name, *args, **kwargs):\n full_container_name = 's3://' + container_name\n container = self._containers[full_container_name]\n obj = container.resource.Object(container.bucket.name, object_name)\n obj.container_name = full_container_name\n return obj\n\n def download_object_as_stream(self, obj, chunk_size=64 * 1024, verbose=None, log=None, **_):\n def async_download(a_obj, a_stream, cb, cfg):\n try:\n a_obj.download_fileobj(a_stream, Callback=cb, Config=cfg)\n except Exception as ex:\n (log or self.get_logger()).error('Failed downloading: %s' % ex)\n a_stream.close()\n\n import boto3.s3.transfer\n # return iterable object\n stream = _Stream()\n container = self._containers[obj.container_name]\n config = boto3.s3.transfer.TransferConfig(\n use_threads=container.config.multipart,\n max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,\n num_download_attempts=container.config.retries)\n total_size_mb = obj.content_length / (1024. * 1024.)\n remote_path = os.path.join(obj.container_name, obj.key)\n cb = DownloadProgressReport(total_size_mb, verbose, remote_path, log)\n self._get_stream_download_pool().submit(async_download, obj, stream, cb, config)\n\n return stream\n\n def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):\n import boto3.s3.transfer\n p = Path(local_path)\n if not overwrite_existing and p.is_file():\n self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))\n return\n container = self._containers[obj.container_name]\n obj.download_file(str(p),\n Callback=callback,\n Config=boto3.s3.transfer.TransferConfig(\n use_threads=container.config.multipart,\n max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,\n num_download_attempts=container.config.retries))\n\n @classmethod\n def _test_bucket_config(cls, conf, log, test_path='', raise_on_error=True, log_on_error=True):\n try:\n import boto3\n from botocore.exceptions import ClientError\n except ImportError:\n return False\n\n if not conf.bucket:\n return False\n try:\n if not conf.is_valid():\n raise Exception('Missing credentials')\n\n fullname = furl(conf.bucket).add(path=test_path).add(path='%s-upload_test' % cls.__module__)\n bucket_name = str(fullname.path.segments[0])\n filename = str(furl(path=fullname.path.segments[1:]))\n\n data = {\n 'user': getpass.getuser(),\n 'machine': gethostname(),\n 'time': datetime.utcnow().isoformat()\n }\n\n boto_session = boto3.Session(conf.key, conf.secret)\n boto_resource = boto_session.resource('s3', conf.region)\n bucket = boto_resource.Bucket(bucket_name)\n bucket.put_object(Key=filename, Body=six.b(json.dumps(data)))\n\n region = cls._get_bucket_region(conf=conf, log=log, report_info=True)\n\n if region and ((conf.region and region != conf.region) or (not conf.region and region != 'us-east-1')):\n msg = \"incorrect region specified for bucket %s (detected region %s)\" % (conf.bucket, region)\n else:\n return True\n\n except ClientError as ex:\n msg = ex.response['Error']['Message']\n if log_on_error and log:\n log.error(msg)\n\n if raise_on_error:\n raise\n\n except Exception as ex:\n msg = str(ex)\n if log_on_error and log:\n log.error(msg)\n\n if raise_on_error:\n raise\n\n msg = (\"Failed testing access to bucket %s: \" % conf.bucket) + msg\n\n if log_on_error and log:\n log.error(msg)\n\n if raise_on_error:\n raise StorageError(msg)\n\n return False\n\n @classmethod\n def _get_bucket_region(cls, conf, log=None, report_info=False):\n import boto3\n from botocore.exceptions import ClientError\n\n if not conf.bucket:\n return None\n\n def report(msg):\n if log and conf.get_bucket_host() not in cls._bucket_location_failure_reported:\n if report_info:\n log.debug(msg)\n else:\n log.warning(msg)\n cls._bucket_location_failure_reported.add(conf.get_bucket_host())\n\n try:\n boto_session = boto3.Session(conf.key, conf.secret)\n boto_resource = boto_session.resource('s3')\n return boto_resource.meta.client.get_bucket_location(Bucket=conf.bucket)[\"LocationConstraint\"]\n\n except ClientError as ex:\n report(\"Failed getting bucket location (region) for bucket \"\n \"%s: %s (%s, access_key=%s). Default region will be used. \"\n \"This is normal if you do not have GET_BUCKET_LOCATION permission\"\n % (conf.bucket, ex.response['Error']['Message'], ex.response['Error']['Code'], conf.key))\n except Exception as ex:\n report(\"Failed getting bucket location (region) for bucket %s: %s. Default region will be used.\"\n % (conf.bucket, str(ex)))\n\n return None\n\n def get_direct_access(self, remote_path, **_):\n return None\n\n def test_upload(self, test_path, config, **_):\n return True\n\n\nclass _GoogleCloudStorageDriver(_Driver):\n \"\"\"Storage driver for google cloud storage\"\"\"\n\n _stream_download_pool_connections = 128\n _stream_download_pool = None\n\n _containers = {}\n\n scheme = 'gs'\n scheme_prefix = str(furl(scheme=scheme, netloc=''))\n\n class _Container(object):\n def __init__(self, name, cfg):\n try:\n from google.cloud import storage\n from google.oauth2 import service_account\n except ImportError:\n raise UsageError(\n 'Google cloud driver not found. '\n 'Please install driver using: pip install \\\"google-cloud-storage>=1.13.2\\\"'\n )\n\n self.name = name[len(_GoogleCloudStorageDriver.scheme_prefix):]\n\n if cfg.credentials_json:\n credentials = service_account.Credentials.from_service_account_file(cfg.credentials_json)\n else:\n credentials = None\n\n self.client = storage.Client(project=cfg.project, credentials=credentials)\n for adapter in self.client._http.adapters.values():\n if cfg.pool_connections:\n adapter._pool_connections = cfg.pool_connections\n if cfg.pool_maxsize:\n adapter._pool_maxsize = cfg.pool_maxsize\n\n self.config = cfg\n self.bucket = self.client.bucket(self.name)\n\n def _get_stream_download_pool(self):\n if self._stream_download_pool is None:\n self._stream_download_pool = ThreadPoolExecutor(max_workers=self._stream_download_pool_connections)\n return self._stream_download_pool\n\n def get_container(self, container_name, config=None, **kwargs):\n if container_name not in self._containers:\n self._containers[container_name] = self._Container(name=container_name, cfg=config)\n self._containers[container_name].config.retries = kwargs.get('retries', 5)\n return self._containers[container_name]\n\n def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs):\n try:\n blob = container.bucket.blob(object_name)\n blob.upload_from_file(iterator)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n return True\n\n def upload_object(self, file_path, container, object_name, extra=None, **kwargs):\n try:\n blob = container.bucket.blob(object_name)\n blob.upload_from_filename(file_path)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n return True\n\n def list_container_objects(self, container, **kwargs):\n return list(container.bucket.list_blobs())\n\n def delete_object(self, object, **kwargs):\n try:\n object.delete()\n except Exception as ex:\n try:\n from google.cloud.exceptions import NotFound\n if isinstance(ex, NotFound):\n return False\n except ImportError:\n pass\n name = getattr(object, \"name\", \"\")\n self.get_logger().warning(\"Failed deleting object {}: {}\".format(name, ex))\n return False\n\n return not object.exists()\n\n def get_object(self, container_name, object_name, *args, **kwargs):\n full_container_name = str(furl(scheme=self.scheme, netloc=container_name))\n container = self._containers[full_container_name]\n obj = container.bucket.blob(object_name)\n obj.container_name = full_container_name\n return obj\n\n def download_object_as_stream(self, obj, chunk_size=256 * 1024, **_):\n raise NotImplementedError('Unsupported for google storage')\n\n def async_download(a_obj, a_stream):\n try:\n a_obj.download_to_file(a_stream)\n except Exception as ex:\n self.get_logger().error('Failed downloading: %s' % ex)\n a_stream.close()\n\n # return iterable object\n stream = _Stream()\n obj.chunk_size = chunk_size\n self._get_stream_download_pool().submit(async_download, obj, stream)\n\n return stream\n\n def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):\n p = Path(local_path)\n if not overwrite_existing and p.is_file():\n self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))\n return\n obj.download_to_filename(str(p))\n\n def test_upload(self, test_path, config, **_):\n bucket_url = str(furl(scheme=self.scheme, netloc=config.bucket, path=config.subdir))\n bucket = self.get_container(container_name=bucket_url, config=config).bucket\n\n test_obj = bucket\n\n if test_path:\n if not test_path.endswith('/'):\n test_path += '/'\n\n blob = bucket.blob(test_path)\n\n if blob.exists():\n test_obj = blob\n\n permissions_to_test = ('storage.objects.get', 'storage.objects.update')\n return set(test_obj.test_iam_permissions(permissions_to_test)) == set(permissions_to_test)\n\n def get_direct_access(self, remote_path, **_):\n return None\n\n\nclass _AzureBlobServiceStorageDriver(_Driver):\n scheme = 'azure'\n\n _containers = {}\n\n class _Container(object):\n def __init__(self, name, config):\n try:\n from azure.common import AzureHttpError # noqa: F401\n from azure.storage.blob import BlockBlobService\n except ImportError:\n raise UsageError(\n 'Azure blob storage driver not found. '\n 'Please install driver using: pip install \\\"azure.storage.blob<=2.1.0\\\"'\n )\n\n self.name = name\n self.config = config\n self.blob_service = BlockBlobService(\n account_name=config.account_name,\n account_key=config.account_key,\n )\n\n @attrs\n class _Object(object):\n container = attrib()\n blob_name = attrib()\n content_length = attrib()\n\n def get_container(self, container_name=None, config=None, **kwargs):\n container_name = container_name or config.container_name\n if container_name not in self._containers:\n self._containers[container_name] = self._Container(name=container_name, config=config)\n # self._containers[container_name].config.retries = kwargs.get('retries', 5)\n return self._containers[container_name]\n\n def upload_object_via_stream(self, iterator, container, object_name, callback=None, extra=None, **kwargs):\n from azure.common import AzureHttpError # noqa\n\n blob_name = self._blob_name_from_object_path(object_name, container.name) # noqa: F841\n try:\n container.blob_service.MAX_SINGLE_PUT_SIZE = 16 * 1024 * 1024\n container.blob_service.socket_timeout = (300, 2000)\n container.blob_service.create_blob_from_bytes(\n container.name,\n object_name,\n iterator.read() if hasattr(iterator, \"read\") else bytes(iterator),\n # timeout=300,\n max_connections=2,\n progress_callback=callback,\n )\n return True\n except AzureHttpError as ex:\n self.get_logger().error('Failed uploading (Azure error): %s' % ex)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n\n def upload_object(self, file_path, container, object_name, callback=None, extra=None, **kwargs):\n from azure.common import AzureHttpError # noqa\n\n blob_name = self._blob_name_from_object_path(object_name, container.name)\n stream = None\n try:\n from azure.storage.blob import ContentSettings # noqa\n from mimetypes import guess_type\n container.blob_service.MAX_SINGLE_PUT_SIZE = 16 * 1024 * 1024\n container.blob_service.socket_timeout = (300, 2000)\n container.blob_service.create_blob_from_path(\n container.name,\n blob_name,\n file_path,\n # timeout=300,\n max_connections=2,\n content_settings=ContentSettings(content_type=guess_type(file_path)),\n progress_callback=callback,\n )\n return True\n except AzureHttpError as ex:\n self.get_logger().error('Failed uploading (Azure error): %s' % ex)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n finally:\n if stream:\n stream.close()\n\n def list_container_objects(self, container, ex_prefix=None, **kwargs):\n return list(container.blob_service.list_blobs(container_name=container.name, prefix=ex_prefix))\n\n def delete_object(self, object, **kwargs):\n container = object.container\n container.blob_service.delete_blob(\n container.name,\n object.blob_name,\n )\n return not object.container.blob_service.exists(container.name, object.blob_name)\n\n def get_object(self, container_name, object_name, *args, **kwargs):\n container = self._containers.get(container_name)\n if not container:\n raise StorageError(\"Container `{}` not found for object {}\".format(container_name, object_name))\n\n # blob_name = self._blob_name_from_object_path(object_name, container_name)\n blob = container.blob_service.get_blob_properties(container.name, object_name)\n\n return self._Object(container=container, blob_name=blob.name, content_length=blob.properties.content_length)\n\n def download_object_as_stream(self, obj, verbose, *_, **__):\n container = obj.container\n total_size_mb = obj.content_length / (1024. * 1024.)\n remote_path = os.path.join(\n \"{}://\".format(self.scheme),\n container.config.account_name,\n container.name,\n obj.blob_name\n )\n cb = DownloadProgressReport(total_size_mb, verbose, remote_path, self.get_logger())\n blob = container.blob_service.get_blob_to_bytes(\n container.name,\n obj.blob_name,\n progress_callback=cb,\n )\n return blob.content\n\n def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):\n p = Path(local_path)\n if not overwrite_existing and p.is_file():\n self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))\n return\n\n download_done = threading.Event()\n download_done.counter = 0\n\n def callback_func(current, total):\n if callback:\n chunk = current - download_done.counter\n download_done.counter += chunk\n callback(chunk)\n if current >= total:\n download_done.set()\n\n container = obj.container\n container.blob_service.MAX_SINGLE_GET_SIZE = 5 * 1024 * 1024\n _ = container.blob_service.get_blob_to_path(\n container.name,\n obj.blob_name,\n local_path,\n max_connections=10,\n progress_callback=callback_func,\n )\n download_done.wait()\n\n def test_upload(self, test_path, config, **_):\n container = self.get_container(config=config)\n try:\n container.blob_service.get_container_properties(container.name)\n except Exception:\n return False\n else:\n # Using the account Key, we can always upload...\n return True\n\n @classmethod\n def _blob_name_from_object_path(cls, name, container_name):\n scheme = urlparse(name).scheme\n if scheme:\n if scheme != cls.scheme:\n raise StorageError(\n \"When using a URL, only the `{}` scheme is supported for Azure storage: {}\",\n cls.scheme,\n name,\n )\n\n f = furl(name)\n\n if not f.path.segments:\n raise StorageError(\n \"Missing container name in URL {}\",\n name,\n )\n\n parsed_container_name = f.path.segments[0]\n\n if parsed_container_name != container_name:\n raise StorageError(\n \"Container name mismatch (expected {}, found {}) in {}\",\n container_name,\n parsed_container_name,\n name,\n )\n\n if len(f.path.segments) == 1:\n raise StorageError(\n \"No path found following container name {} in {}\",\n container_name,\n name,\n )\n\n return f.path.segments[0], os.path.join(*f.path.segments[1:])\n\n return name\n\n def get_direct_access(self, remote_path, **_):\n return None\n\n\nclass _FileStorageDriver(_Driver):\n \"\"\"\n A base StorageDriver to derive from.\n \"\"\"\n\n scheme = \"file\"\n CHUNK_SIZE = 8096\n IGNORE_FOLDERS = ['.lock', '.hash']\n Object = namedtuple(\"Object\", ['name', 'size', 'extra', 'driver', 'container', 'hash', 'meta_data'])\n\n class _Container(object):\n def __init__(self, name, extra, driver):\n self.name = name\n self.extra = extra\n self.driver = driver\n\n def __init__(self, key, secret=None, secure=True, host=None, port=None,\n **kwargs):\n\n # Use the key as the path to the storage\n self.base_path = key\n\n def _make_path(self, path, ignore_existing=True):\n \"\"\"\n Create a path by checking if it already exists\n \"\"\"\n\n try:\n os.makedirs(path)\n except OSError:\n exp = sys.exc_info()[1]\n if exp.errno == errno.EEXIST and not ignore_existing:\n raise exp\n\n def _check_container_name(self, container_name):\n \"\"\"\n Check if the container name is valid\n\n :param container_name: Container name\n :type container_name: ``str``\n \"\"\"\n\n if '/' in container_name or '\\\\' in container_name:\n raise ValueError(\"Container name \\\"{}\\\" cannot contain \\\\ or / \".format(container_name))\n\n def _make_container(self, container_name):\n \"\"\"\n Create a container instance\n\n :param container_name: Container name.\n :type container_name: ``str``\n\n :return: A Container instance.\n \"\"\"\n container_name = container_name or '.'\n self._check_container_name(container_name)\n\n full_path = os.path.realpath(os.path.join(self.base_path, container_name))\n\n try:\n stat = os.stat(full_path)\n if not os.path.isdir(full_path):\n raise OSError(\"Target path \\\"{}\\\" is not a directory\".format(full_path))\n except OSError:\n raise OSError(\"Target path \\\"{}\\\" is not accessible or does not exist\".format(full_path))\n\n extra = {\n 'creation_time': stat.st_ctime,\n 'access_time': stat.st_atime,\n 'modify_time': stat.st_mtime,\n }\n\n return self._Container(name=container_name, extra=extra, driver=self)\n\n def _make_object(self, container, object_name):\n \"\"\"\n Create an object instance\n\n :param container: Container.\n :type container: :class:`Container`\n\n :param object_name: Object name.\n :type object_name: ``str``\n\n :return: A Object instance.\n \"\"\"\n\n full_path = os.path.realpath(os.path.join(self.base_path, container.name if container else '.', object_name))\n\n if os.path.isdir(full_path):\n raise ValueError(\"Target path \\\"{}\\\" already exist\".format(full_path))\n\n try:\n stat = os.stat(full_path)\n except Exception:\n raise ValueError(\"Cannot access target path \\\"{}\\\"\".format(full_path))\n\n extra = {\n 'creation_time': stat.st_ctime,\n 'access_time': stat.st_atime,\n 'modify_time': stat.st_mtime,\n }\n\n return self.Object(name=object_name, size=stat.st_size, extra=extra,\n driver=self, container=container, hash=None, meta_data=None)\n\n def iterate_containers(self):\n \"\"\"\n Return a generator of containers.\n\n :return: A generator of Container instances.\n \"\"\"\n\n for container_name in os.listdir(self.base_path):\n full_path = os.path.join(self.base_path, container_name)\n if not os.path.isdir(full_path):\n continue\n yield self._make_container(container_name)\n\n def _get_objects(self, container):\n \"\"\"\n Recursively iterate through the file-system and return the object names\n \"\"\"\n\n cpath = self.get_container_cdn_url(container, check=True)\n\n for folder, subfolders, files in os.walk(cpath, topdown=True):\n # Remove unwanted subfolders\n for subf in self.IGNORE_FOLDERS:\n if subf in subfolders:\n subfolders.remove(subf)\n\n for name in files:\n full_path = os.path.join(folder, name)\n object_name = os.path.relpath(full_path, start=cpath)\n yield self._make_object(container, object_name)\n\n def iterate_container_objects(self, container):\n \"\"\"\n Returns a generator of objects for the given container.\n\n :param container: Container instance\n :type container: :class:`Container`\n\n :return: A generator of Object instances.\n \"\"\"\n\n return self._get_objects(container)\n\n def get_container(self, container_name, **_):\n \"\"\"\n Return a container instance.\n\n :param container_name: Container name.\n :type container_name: ``str``\n\n :return: A Container instance.\n \"\"\"\n return self._make_container(container_name)\n\n def get_container_cdn_url(self, container, check=False):\n \"\"\"\n Return a container CDN URL.\n\n :param container: Container instance\n :type container: :class:`Container`\n\n :param check: Indicates if the path's existence must be checked\n :type check: ``bool``\n\n :return: A CDN URL for this container.\n \"\"\"\n path = os.path.realpath(os.path.join(self.base_path, container.name if container else '.'))\n\n if check and not os.path.isdir(path):\n raise ValueError(\"Target path \\\"{}\\\" does not exist\".format(path))\n\n return path\n\n def get_object(self, container_name, object_name, **_):\n \"\"\"\n Return an object instance.\n\n :param container_name: Container name.\n :type container_name: ``str``\n\n :param object_name: Object name.\n :type object_name: ``str``\n\n :return: An Object instance.\n \"\"\"\n container = self._make_container(container_name)\n return self._make_object(container, object_name)\n\n def get_object_cdn_url(self, obj):\n \"\"\"\n Return an object CDN URL.\n\n :param obj: Object instance\n :type obj: :class:`Object`\n\n :return: A CDN URL for this object.\n \"\"\"\n return os.path.realpath(os.path.join(self.base_path, obj.container.name, obj.name))\n\n def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True, **_):\n \"\"\"\n Download an object to the specified destination path.\n\n :param obj: Object instance.\n :type obj: :class:`Object`\n\n :param destination_path: Full path to a file or a directory where the\n incoming file will be saved.\n :type destination_path: ``str``\n\n :param overwrite_existing: True to overwrite an existing file,\n defaults to False.\n :type overwrite_existing: ``bool``\n\n :param delete_on_failure: True to delete a partially downloaded file if\n the download was not successful (hash mismatch / file size).\n :type delete_on_failure: ``bool``\n\n :return: True, if an object has been successfully downloaded, False, otherwise.\n \"\"\"\n\n obj_path = self.get_object_cdn_url(obj)\n base_name = os.path.basename(destination_path)\n\n if not base_name and not os.path.exists(destination_path):\n raise ValueError('Path \\\"{}\\\" does not exist'.format(destination_path))\n\n if not base_name:\n file_path = os.path.join(destination_path, obj.name)\n else:\n file_path = destination_path\n\n if os.path.exists(file_path) and not overwrite_existing:\n raise ValueError('File \\\"{}\\\" already exists, but overwrite_existing=False'.format(file_path))\n\n try:\n shutil.copy(obj_path, file_path)\n except IOError:\n if delete_on_failure:\n # noinspection PyBroadException\n try:\n os.unlink(file_path)\n except Exception:\n pass\n return False\n\n return True\n\n def download_object_as_stream(self, obj, chunk_size=None, **_):\n \"\"\"\n Return a generator which yields object data.\n\n :param obj: Object instance\n :type obj: :class:`Object`\n\n :param chunk_size: Optional chunk size (in bytes).\n :type chunk_size: ``int``\n\n :return: A stream of binary chunks of data.\n \"\"\"\n path = self.get_object_cdn_url(obj)\n with open(path, 'rb') as obj_file:\n for data in self._read_in_chunks(obj_file, chunk_size=chunk_size):\n yield data\n\n def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, **_):\n \"\"\"\n Upload an object currently located on a disk.\n\n :param file_path: Path to the object on disk.\n :type file_path: ``str``\n\n :param container: Destination container.\n :type container: :class:`Container`\n\n :param object_name: Object name.\n :type object_name: ``str``\n\n :param verify_hash: Verify hast\n :type verify_hash: ``bool``\n\n :param extra: (optional) Extra attributes (driver specific).\n :type extra: ``dict``\n \"\"\"\n\n path = self.get_container_cdn_url(container, check=True)\n obj_path = os.path.join(path, object_name)\n base_path = os.path.dirname(obj_path)\n\n self._make_path(base_path)\n\n shutil.copy(file_path, obj_path)\n\n os.chmod(obj_path, int('664', 8))\n\n return self._make_object(container, object_name)\n\n def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs):\n \"\"\"\n Upload an object using an iterator.\n\n If a provider supports it, chunked transfer encoding is used and you\n don't need to know in advance the amount of data to be uploaded.\n\n Otherwise if a provider doesn't support it, iterator will be exhausted\n so a total size for data to be uploaded can be determined.\n\n Note: Exhausting the iterator means that the whole data must be\n buffered in memory which might result in memory exhausting when\n uploading a very large object.\n\n If a file is located on a disk you are advised to use upload_object\n function which uses fs.stat function to determine the file size and it\n doesn't need to buffer whole object in the memory.\n\n :type iterator: ``object``\n :param iterator: An object which implements the iterator\n interface and yields binary chunks of data.\n\n :type container: :class:`Container`\n :param container: Destination container.\n\n :type object_name: ``str``\n :param object_name: Object name.\n\n :type extra: ``dict``\n :param extra: (optional) Extra attributes (driver specific). Note:\n This dictionary must contain a 'content_type' key which represents\n a content type of the stored object.\n \"\"\"\n path = self.get_container_cdn_url(container, check=True)\n obj_path = os.path.join(path, object_name)\n base_path = os.path.dirname(obj_path)\n self._make_path(base_path)\n\n obj_path = os.path.realpath(obj_path)\n with open(obj_path, 'wb' if not isinstance(iterator, StringIO) else 'wt') as obj_file:\n obj_file.write(iterator.read() if hasattr(iterator, 'read') else bytes(iterator))\n\n os.chmod(obj_path, int('664', 8))\n return self._make_object(container, object_name)\n\n def delete_object(self, obj, **_):\n \"\"\"\n Delete an object.\n\n :type obj: :class:`Object`\n :param obj: Object instance.\n\n :return: True on success.\n \"\"\"\n\n path = self.get_object_cdn_url(obj)\n\n try:\n os.unlink(path)\n except Exception:\n return False\n\n # # Check and delete all the empty parent folders\n # path = os.path.dirname(path)\n # container_url = obj.container.get_cdn_url()\n #\n # # Delete the empty parent folders till the container's level\n # while path != container_url:\n # try:\n # os.rmdir(path)\n # except OSError:\n # exp = sys.exc_info()[1]\n # if exp.errno == errno.ENOTEMPTY:\n # break\n # raise exp\n #\n # path = os.path.dirname(path)\n\n return True\n\n def create_container(self, container_name):\n \"\"\"\n Create a new container.\n\n :type container_name: ``str``\n :param container_name: Container name.\n\n :return: A Container instance on success.\n \"\"\"\n container_name = container_name or '.'\n self._check_container_name(container_name)\n\n path = os.path.join(self.base_path, container_name)\n\n try:\n self._make_path(path, ignore_existing=False)\n except OSError:\n exp = sys.exc_info()[1]\n if exp.errno == errno.EEXIST:\n raise ValueError('Container \\\"{}\\\" with this name already exists. The name '\n 'must be unique among all the containers in the '\n 'system'.format(container_name))\n else:\n raise ValueError('Error creating container \\\"{}\\\"'.format(container_name))\n except Exception:\n raise ValueError('Error creating container \\\"{}\\\"'.format(container_name))\n\n return self._make_container(container_name)\n\n def delete_container(self, container):\n \"\"\"\n Delete a container.\n\n :type container: :class:`Container`\n :param container: Container instance\n\n :return: True on success, False otherwise.\n \"\"\"\n\n # Check if there are any objects inside this\n for obj in self._get_objects(container):\n raise ValueError('Container \\\"{}\\\" is not empty'.format(container.name))\n\n path = self.get_container_cdn_url(container, check=True)\n\n # noinspection PyBroadException\n try:\n shutil.rmtree(path)\n except Exception:\n return False\n\n return True\n\n def list_container_objects(self, container, **kwargs):\n return list(self.iterate_container_objects(container))\n\n @staticmethod\n def _read_in_chunks(iterator, chunk_size=None, fill_size=False, yield_empty=False):\n \"\"\"\n Return a generator which yields data in chunks.\n\n :param iterator: An object which implements an iterator interface\n or a File like object with read method.\n :type iterator: :class:`object` which implements iterator interface.\n\n :param chunk_size: Optional chunk size (defaults to CHUNK_SIZE)\n :type chunk_size: ``int``\n\n :param fill_size: If True, make sure chunks are exactly chunk_size in\n length (except for last chunk).\n :type fill_size: ``bool``\n\n :param yield_empty: If true and iterator returned no data, only yield empty\n bytes object\n :type yield_empty: ``bool``\n\n TODO: At some point in the future we could use byte arrays here if version\n >= Python 3. This should speed things up a bit and reduce memory usage.\n \"\"\"\n chunk_size = chunk_size or _FileStorageDriver.CHUNK_SIZE\n if six.PY3:\n from io import FileIO as file\n\n if isinstance(iterator, (file)):\n get_data = iterator.read\n args = (chunk_size,)\n else:\n get_data = next\n args = (iterator,)\n\n data = bytes('')\n empty = False\n\n while not empty or len(data) > 0:\n if not empty:\n try:\n chunk = bytes(get_data(*args))\n if len(chunk) > 0:\n data += chunk\n else:\n empty = True\n except StopIteration:\n empty = True\n\n if len(data) == 0:\n if empty and yield_empty:\n yield bytes('')\n\n return\n\n if fill_size:\n if empty or len(data) >= chunk_size:\n yield data[:chunk_size]\n data = data[chunk_size:]\n else:\n yield data\n data = bytes('')\n\n def get_direct_access(self, remote_path, **_):\n # this will always make sure we have full path and file:// prefix\n full_url = StorageHelper.conform_url(remote_path)\n # now get rid of the file:// prefix\n path = Path(full_url[7:])\n if not path.exists():\n raise ValueError(\"Requested path does not exist: {}\".format(path))\n return path.as_posix()\n\n def test_upload(self, test_path, config, **kwargs):\n return True\n\n\ndriver_schemes = set(\n filter(\n None,\n itertools.chain(\n (getattr(cls, \"scheme\", None) for cls in _Driver.__subclasses__()),\n *(getattr(cls, \"schemes\", []) for cls in _Driver.__subclasses__())\n )\n )\n)\n\nremote_driver_schemes = driver_schemes - {_FileStorageDriver.scheme}\n" ]
[ [ "numpy.frombuffer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bwconrad/solo-learn
[ "ec510d803a4428d7d8803b90fa1484c42cb9cb52", "ec510d803a4428d7d8803b90fa1484c42cb9cb52", "ec510d803a4428d7d8803b90fa1484c42cb9cb52", "ec510d803a4428d7d8803b90fa1484c42cb9cb52", "ec510d803a4428d7d8803b90fa1484c42cb9cb52", "ec510d803a4428d7d8803b90fa1484c42cb9cb52", "ec510d803a4428d7d8803b90fa1484c42cb9cb52", "ec510d803a4428d7d8803b90fa1484c42cb9cb52" ]
[ "downstream/tinypersons/mmdet/datasets/pipelines/formating.py", "downstream/tinypersons/mmdet/models/detectors/centernet.py", "downstream/tinypersons/mmdet/models/roi_heads/mask_heads/mask_point_head.py", "downstream/med-seg/pannuke_eval/run_breast.py", "solo/losses/vibcreg.py", "solo/methods/simclr.py", "downstream/tinypersons/mmdet/models/dense_heads/nasfcos_head.py", "downstream/tinypersons/mmdet/core/bbox/samplers/sampling_result.py" ]
[ "from collections.abc import Sequence\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.parallel import DataContainer as DC\n\nfrom ..builder import PIPELINES\n\n\ndef to_tensor(data):\n \"\"\"Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n\n Args:\n data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to\n be converted.\n \"\"\"\n\n if isinstance(data, torch.Tensor):\n return data\n elif isinstance(data, np.ndarray):\n return torch.from_numpy(data)\n elif isinstance(data, Sequence) and not mmcv.is_str(data):\n return torch.tensor(data)\n elif isinstance(data, int):\n return torch.LongTensor([data])\n elif isinstance(data, float):\n return torch.FloatTensor([data])\n else:\n raise TypeError(f'type {type(data)} cannot be converted to tensor.')\n\n\[email protected]_module()\nclass ToTensor:\n \"\"\"Convert some results to :obj:`torch.Tensor` by given keys.\n\n Args:\n keys (Sequence[str]): Keys that need to be converted to Tensor.\n \"\"\"\n\n def __init__(self, keys):\n self.keys = keys\n\n def __call__(self, results):\n \"\"\"Call function to convert data in results to :obj:`torch.Tensor`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted\n to :obj:`torch.Tensor`.\n \"\"\"\n for key in self.keys:\n results[key] = to_tensor(results[key])\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self.keys})'\n\n\[email protected]_module()\nclass ImageToTensor:\n \"\"\"Convert image to :obj:`torch.Tensor` by given keys.\n\n The dimension order of input image is (H, W, C). The pipeline will convert\n it to (C, H, W). If only 2 dimension (H, W) is given, the output would be\n (1, H, W).\n\n Args:\n keys (Sequence[str]): Key of images to be converted to Tensor.\n \"\"\"\n\n def __init__(self, keys):\n self.keys = keys\n\n def __call__(self, results):\n \"\"\"Call function to convert image in results to :obj:`torch.Tensor` and\n transpose the channel order.\n\n Args:\n results (dict): Result dict contains the image data to convert.\n\n Returns:\n dict: The result dict contains the image converted\n to :obj:`torch.Tensor` and transposed to (C, H, W) order.\n \"\"\"\n for key in self.keys:\n img = results[key]\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n results[key] = to_tensor(img.transpose(2, 0, 1))\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self.keys})'\n\n\[email protected]_module()\nclass Transpose:\n \"\"\"Transpose some results by given keys.\n\n Args:\n keys (Sequence[str]): Keys of results to be transposed.\n order (Sequence[int]): Order of transpose.\n \"\"\"\n\n def __init__(self, keys, order):\n self.keys = keys\n self.order = order\n\n def __call__(self, results):\n \"\"\"Call function to transpose the channel order of data in results.\n\n Args:\n results (dict): Result dict contains the data to transpose.\n\n Returns:\n dict: The result dict contains the data transposed to \\\n ``self.order``.\n \"\"\"\n for key in self.keys:\n results[key] = results[key].transpose(self.order)\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(keys={self.keys}, order={self.order})'\n\n\[email protected]_module()\nclass ToDataContainer:\n \"\"\"Convert results to :obj:`mmcv.DataContainer` by given fields.\n\n Args:\n fields (Sequence[dict]): Each field is a dict like\n ``dict(key='xxx', **kwargs)``. The ``key`` in result will\n be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.\n Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),\n dict(key='gt_labels'))``.\n \"\"\"\n\n def __init__(self,\n fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),\n dict(key='gt_labels'))):\n self.fields = fields\n\n def __call__(self, results):\n \"\"\"Call function to convert data in results to\n :obj:`mmcv.DataContainer`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted to \\\n :obj:`mmcv.DataContainer`.\n \"\"\"\n\n for field in self.fields:\n field = field.copy()\n key = field.pop('key')\n results[key] = DC(results[key], **field)\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(fields={self.fields})'\n\n\[email protected]_module()\nclass DefaultFormatBundle:\n \"\"\"Default formatting bundle.\n\n It simplifies the pipeline of formatting common fields, including \"img\",\n \"proposals\", \"gt_bboxes\", \"gt_labels\", \"gt_masks\" and \"gt_semantic_seg\".\n These fields are formatted as follows.\n\n - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)\n - proposals: (1)to tensor, (2)to DataContainer\n - gt_bboxes: (1)to tensor, (2)to DataContainer\n - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer\n - gt_labels: (1)to tensor, (2)to DataContainer\n - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)\n - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \\\n (3)to DataContainer (stack=True)\n \"\"\"\n\n def __call__(self, results):\n \"\"\"Call function to transform and format common fields in results.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data that is formatted with \\\n default bundle.\n \"\"\"\n\n if 'img' in results:\n img = results['img']\n # add default meta keys\n results = self._add_default_meta_keys(results)\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n img = np.ascontiguousarray(img.transpose(2, 0, 1))\n results['img'] = DC(to_tensor(img), stack=True)\n for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:\n if key not in results:\n continue\n results[key] = DC(to_tensor(results[key]))\n if 'gt_masks' in results:\n results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)\n if 'gt_semantic_seg' in results:\n results['gt_semantic_seg'] = DC(\n to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)\n return results\n\n def _add_default_meta_keys(self, results):\n \"\"\"Add default meta keys.\n\n We set default meta keys including `pad_shape`, `scale_factor` and\n `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and\n `Pad` are implemented during the whole pipeline.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n results (dict): Updated result dict contains the data to convert.\n \"\"\"\n img = results['img']\n results.setdefault('pad_shape', img.shape)\n results.setdefault('scale_factor', 1.0)\n num_channels = 1 if len(img.shape) < 3 else img.shape[2]\n results.setdefault(\n 'img_norm_cfg',\n dict(\n mean=np.zeros(num_channels, dtype=np.float32),\n std=np.ones(num_channels, dtype=np.float32),\n to_rgb=False))\n return results\n\n def __repr__(self):\n return self.__class__.__name__\n\n\[email protected]_module()\nclass Collect:\n \"\"\"Collect data from the loader relevant to the specific task.\n\n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of \"img\", \"proposals\", \"gt_bboxes\",\n \"gt_bboxes_ignore\", \"gt_labels\", and/or \"gt_masks\".\n\n The \"img_meta\" item is always populated. The contents of the \"img_meta\"\n dictionary depends on \"meta_keys\". By default this includes:\n\n - \"img_shape\": shape of the image input to the network as a tuple \\\n (h, w, c). Note that images may be zero padded on the \\\n bottom/right if the batch tensor is larger than this shape.\n\n - \"scale_factor\": a float indicating the preprocessing scale\n\n - \"flip\": a boolean indicating if image flip transform was used\n\n - \"filename\": path to the image file\n\n - \"ori_shape\": original shape of the image as a tuple (h, w, c)\n\n - \"pad_shape\": image shape after padding\n\n - \"img_norm_cfg\": a dict of normalization information:\n\n - mean - per channel mean subtraction\n - std - per channel std divisor\n - to_rgb - bool indicating if bgr was converted to rgb\n\n Args:\n keys (Sequence[str]): Keys of results to be collected in ``data``.\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',\n 'pad_shape', 'scale_factor', 'flip', 'flip_direction',\n 'img_norm_cfg')``\n \"\"\"\n\n def __init__(self,\n keys,\n meta_keys=('filename', 'ori_filename', 'ori_shape',\n 'img_shape', 'pad_shape', 'scale_factor', 'flip',\n 'flip_direction', 'img_norm_cfg')):\n self.keys = keys\n self.meta_keys = meta_keys\n\n def __call__(self, results):\n \"\"\"Call function to collect keys in results. The keys in ``meta_keys``\n will be converted to :obj:mmcv.DataContainer.\n\n Args:\n results (dict): Result dict contains the data to collect.\n\n Returns:\n dict: The result dict contains the following keys\n\n - keys in``self.keys``\n - ``img_metas``\n \"\"\"\n\n data = {}\n img_meta = {}\n for key in self.meta_keys:\n img_meta[key] = results[key]\n data['img_metas'] = DC(img_meta, cpu_only=True)\n for key in self.keys:\n data[key] = results[key]\n return data\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(keys={self.keys}, meta_keys={self.meta_keys})'\n\n\[email protected]_module()\nclass WrapFieldsToLists:\n \"\"\"Wrap fields of the data dictionary into lists for evaluation.\n\n This class can be used as a last step of a test or validation\n pipeline for single image evaluation or inference.\n\n Example:\n >>> test_pipeline = [\n >>> dict(type='LoadImageFromFile'),\n >>> dict(type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n >>> dict(type='Pad', size_divisor=32),\n >>> dict(type='ImageToTensor', keys=['img']),\n >>> dict(type='Collect', keys=['img']),\n >>> dict(type='WrapFieldsToLists')\n >>> ]\n \"\"\"\n\n def __call__(self, results):\n \"\"\"Call function to wrap fields into lists.\n\n Args:\n results (dict): Result dict contains the data to wrap.\n\n Returns:\n dict: The result dict where value of ``self.keys`` are wrapped \\\n into list.\n \"\"\"\n\n # Wrap dict fields into lists\n for key, val in results.items():\n results[key] = [val]\n return results\n\n def __repr__(self):\n return f'{self.__class__.__name__}()'\n", "import torch\n\nfrom mmdet.core import bbox2result\nfrom mmdet.models.builder import DETECTORS\nfrom ...core.utils import flip_tensor\nfrom .single_stage import SingleStageDetector\n\n\[email protected]_module()\nclass CenterNet(SingleStageDetector):\n \"\"\"Implementation of CenterNet(Objects as Points)\n\n <https://arxiv.org/abs/1904.07850>.\n \"\"\"\n\n def __init__(self,\n backbone,\n neck,\n bbox_head,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n super(CenterNet, self).__init__(backbone, neck, bbox_head, train_cfg,\n test_cfg, pretrained, init_cfg)\n\n def merge_aug_results(self, aug_results, with_nms):\n \"\"\"Merge augmented detection bboxes and score.\n\n Args:\n aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each\n image.\n with_nms (bool): If True, do nms before return boxes.\n\n Returns:\n tuple: (out_bboxes, out_labels)\n \"\"\"\n recovered_bboxes, aug_labels = [], []\n for single_result in aug_results:\n recovered_bboxes.append(single_result[0][0])\n aug_labels.append(single_result[0][1])\n\n bboxes = torch.cat(recovered_bboxes, dim=0).contiguous()\n labels = torch.cat(aug_labels).contiguous()\n if with_nms:\n out_bboxes, out_labels = self.bbox_head._bboxes_nms(\n bboxes, labels, self.bbox_head.test_cfg)\n else:\n out_bboxes, out_labels = bboxes, labels\n\n return out_bboxes, out_labels\n\n def aug_test(self, imgs, img_metas, rescale=True):\n \"\"\"Augment testing of CenterNet. Aug test must have flipped image pair,\n and unlike CornerNet, it will perform an averaging operation on the\n feature map instead of detecting bbox.\n\n Args:\n imgs (list[Tensor]): Augmented images.\n img_metas (list[list[dict]]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n rescale (bool): If True, return boxes in original image space.\n Default: True.\n\n Note:\n ``imgs`` must including flipped image pairs.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n \"\"\"\n img_inds = list(range(len(imgs)))\n assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (\n 'aug test must have flipped image pair')\n aug_results = []\n for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):\n flip_direction = img_metas[flip_ind][0]['flip_direction']\n img_pair = torch.cat([imgs[ind], imgs[flip_ind]])\n x = self.extract_feat(img_pair)\n center_heatmap_preds, wh_preds, offset_preds = self.bbox_head(x)\n assert len(center_heatmap_preds) == len(wh_preds) == len(\n offset_preds) == 1\n\n # Feature map averaging\n center_heatmap_preds[0] = (\n center_heatmap_preds[0][0:1] +\n flip_tensor(center_heatmap_preds[0][1:2], flip_direction)) / 2\n wh_preds[0] = (wh_preds[0][0:1] +\n flip_tensor(wh_preds[0][1:2], flip_direction)) / 2\n\n bbox_list = self.bbox_head.get_bboxes(\n center_heatmap_preds,\n wh_preds, [offset_preds[0][0:1]],\n img_metas[ind],\n rescale=rescale,\n with_nms=False)\n aug_results.append(bbox_list)\n\n nms_cfg = self.bbox_head.test_cfg.get('nms_cfg', None)\n if nms_cfg is None:\n with_nms = False\n else:\n with_nms = True\n bbox_list = [self.merge_aug_results(aug_results, with_nms)]\n bbox_results = [\n bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)\n for det_bboxes, det_labels in bbox_list\n ]\n return bbox_results\n", "# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import point_sample, rel_roi_point_to_rel_img_point\nfrom mmcv.runner import BaseModule\n\nfrom mmdet.models.builder import HEADS, build_loss\n\n\[email protected]_module()\nclass MaskPointHead(BaseModule):\n \"\"\"A mask point head use in PointRend.\n\n ``MaskPointHead`` use shared multi-layer perceptron (equivalent to\n nn.Conv1d) to predict the logit of input points. The fine-grained feature\n and coarse feature will be concatenate together for predication.\n\n Args:\n num_fcs (int): Number of fc layers in the head. Default: 3.\n in_channels (int): Number of input channels. Default: 256.\n fc_channels (int): Number of fc channels. Default: 256.\n num_classes (int): Number of classes for logits. Default: 80.\n class_agnostic (bool): Whether use class agnostic classification.\n If so, the output channels of logits will be 1. Default: False.\n coarse_pred_each_layer (bool): Whether concatenate coarse feature with\n the output of each fc layer. Default: True.\n conv_cfg (dict | None): Dictionary to construct and config conv layer.\n Default: dict(type='Conv1d'))\n norm_cfg (dict | None): Dictionary to construct and config norm layer.\n Default: None.\n loss_point (dict): Dictionary to construct and config loss layer of\n point head. Default: dict(type='CrossEntropyLoss', use_mask=True,\n loss_weight=1.0).\n init_cfg (dict or list[dict], optional): Initialization config dict.\n \"\"\"\n\n def __init__(self,\n num_classes,\n num_fcs=3,\n in_channels=256,\n fc_channels=256,\n class_agnostic=False,\n coarse_pred_each_layer=True,\n conv_cfg=dict(type='Conv1d'),\n norm_cfg=None,\n act_cfg=dict(type='ReLU'),\n loss_point=dict(\n type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),\n init_cfg=dict(\n type='Normal', std=0.001,\n override=dict(name='fc_logits'))):\n super().__init__(init_cfg)\n self.num_fcs = num_fcs\n self.in_channels = in_channels\n self.fc_channels = fc_channels\n self.num_classes = num_classes\n self.class_agnostic = class_agnostic\n self.coarse_pred_each_layer = coarse_pred_each_layer\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.loss_point = build_loss(loss_point)\n\n fc_in_channels = in_channels + num_classes\n self.fcs = nn.ModuleList()\n for _ in range(num_fcs):\n fc = ConvModule(\n fc_in_channels,\n fc_channels,\n kernel_size=1,\n stride=1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg)\n self.fcs.append(fc)\n fc_in_channels = fc_channels\n fc_in_channels += num_classes if self.coarse_pred_each_layer else 0\n\n out_channels = 1 if self.class_agnostic else self.num_classes\n self.fc_logits = nn.Conv1d(\n fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0)\n\n def forward(self, fine_grained_feats, coarse_feats):\n \"\"\"Classify each point base on fine grained and coarse feats.\n\n Args:\n fine_grained_feats (Tensor): Fine grained feature sampled from FPN,\n shape (num_rois, in_channels, num_points).\n coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead,\n shape (num_rois, num_classes, num_points).\n\n Returns:\n Tensor: Point classification results,\n shape (num_rois, num_class, num_points).\n \"\"\"\n\n x = torch.cat([fine_grained_feats, coarse_feats], dim=1)\n for fc in self.fcs:\n x = fc(x)\n if self.coarse_pred_each_layer:\n x = torch.cat((x, coarse_feats), dim=1)\n return self.fc_logits(x)\n\n def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks,\n cfg):\n \"\"\"Get training targets of MaskPointHead for all images.\n\n Args:\n rois (Tensor): Region of Interest, shape (num_rois, 5).\n rel_roi_points: Points coordinates relative to RoI, shape\n (num_rois, num_points, 2).\n sampling_results (:obj:`SamplingResult`): Sampling result after\n sampling and assignment.\n gt_masks (Tensor) : Ground truth segmentation masks of\n corresponding boxes, shape (num_rois, height, width).\n cfg (dict): Training cfg.\n\n Returns:\n Tensor: Point target, shape (num_rois, num_points).\n \"\"\"\n\n num_imgs = len(sampling_results)\n rois_list = []\n rel_roi_points_list = []\n for batch_ind in range(num_imgs):\n inds = (rois[:, 0] == batch_ind)\n rois_list.append(rois[inds])\n rel_roi_points_list.append(rel_roi_points[inds])\n pos_assigned_gt_inds_list = [\n res.pos_assigned_gt_inds for res in sampling_results\n ]\n cfg_list = [cfg for _ in range(num_imgs)]\n\n point_targets = map(self._get_target_single, rois_list,\n rel_roi_points_list, pos_assigned_gt_inds_list,\n gt_masks, cfg_list)\n point_targets = list(point_targets)\n\n if len(point_targets) > 0:\n point_targets = torch.cat(point_targets)\n\n return point_targets\n\n def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds,\n gt_masks, cfg):\n \"\"\"Get training target of MaskPointHead for each image.\"\"\"\n num_pos = rois.size(0)\n num_points = cfg.num_points\n if num_pos > 0:\n gt_masks_th = (\n gt_masks.to_tensor(rois.dtype, rois.device).index_select(\n 0, pos_assigned_gt_inds))\n gt_masks_th = gt_masks_th.unsqueeze(1)\n rel_img_points = rel_roi_point_to_rel_img_point(\n rois, rel_roi_points, gt_masks_th.shape[2:])\n point_targets = point_sample(gt_masks_th,\n rel_img_points).squeeze(1)\n else:\n point_targets = rois.new_zeros((0, num_points))\n return point_targets\n\n def loss(self, point_pred, point_targets, labels):\n \"\"\"Calculate loss for MaskPointHead.\n\n Args:\n point_pred (Tensor): Point predication result, shape\n (num_rois, num_classes, num_points).\n point_targets (Tensor): Point targets, shape (num_roi, num_points).\n labels (Tensor): Class label of corresponding boxes,\n shape (num_rois, )\n\n Returns:\n dict[str, Tensor]: a dictionary of point loss components\n \"\"\"\n\n loss = dict()\n if self.class_agnostic:\n loss_point = self.loss_point(point_pred, point_targets,\n torch.zeros_like(labels))\n else:\n loss_point = self.loss_point(point_pred, point_targets, labels)\n loss['loss_point'] = loss_point\n return loss\n\n def _get_uncertainty(self, mask_pred, labels):\n \"\"\"Estimate uncertainty based on pred logits.\n\n We estimate uncertainty as L1 distance between 0.0 and the logits\n prediction in 'mask_pred' for the foreground class in `classes`.\n\n Args:\n mask_pred (Tensor): mask predication logits, shape (num_rois,\n num_classes, mask_height, mask_width).\n\n labels (list[Tensor]): Either predicted or ground truth label for\n each predicted mask, of length num_rois.\n\n Returns:\n scores (Tensor): Uncertainty scores with the most uncertain\n locations having the highest uncertainty score,\n shape (num_rois, 1, mask_height, mask_width)\n \"\"\"\n if mask_pred.shape[1] == 1:\n gt_class_logits = mask_pred.clone()\n else:\n inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)\n gt_class_logits = mask_pred[inds, labels].unsqueeze(1)\n return -torch.abs(gt_class_logits)\n\n def get_roi_rel_points_train(self, mask_pred, labels, cfg):\n \"\"\"Get ``num_points`` most uncertain points with random points during\n train.\n\n Sample points in [0, 1] x [0, 1] coordinate space based on their\n uncertainty. The uncertainties are calculated for each point using\n '_get_uncertainty()' function that takes point's logit prediction as\n input.\n\n Args:\n mask_pred (Tensor): A tensor of shape (num_rois, num_classes,\n mask_height, mask_width) for class-specific or class-agnostic\n prediction.\n labels (list): The ground truth class for each instance.\n cfg (dict): Training config of point head.\n\n Returns:\n point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)\n that contains the coordinates sampled points.\n \"\"\"\n num_points = cfg.num_points\n oversample_ratio = cfg.oversample_ratio\n importance_sample_ratio = cfg.importance_sample_ratio\n assert oversample_ratio >= 1\n assert 0 <= importance_sample_ratio <= 1\n batch_size = mask_pred.shape[0]\n num_sampled = int(num_points * oversample_ratio)\n point_coords = torch.rand(\n batch_size, num_sampled, 2, device=mask_pred.device)\n point_logits = point_sample(mask_pred, point_coords)\n # It is crucial to calculate uncertainty based on the sampled\n # prediction value for the points. Calculating uncertainties of the\n # coarse predictions first and sampling them for points leads to\n # incorrect results. To illustrate this: assume uncertainty func(\n # logits)=-abs(logits), a sampled point between two coarse\n # predictions with -1 and 1 logits has 0 logits, and therefore 0\n # uncertainty value. However, if we calculate uncertainties for the\n # coarse predictions first, both will have -1 uncertainty,\n # and sampled point will get -1 uncertainty.\n point_uncertainties = self._get_uncertainty(point_logits, labels)\n num_uncertain_points = int(importance_sample_ratio * num_points)\n num_random_points = num_points - num_uncertain_points\n idx = torch.topk(\n point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]\n shift = num_sampled * torch.arange(\n batch_size, dtype=torch.long, device=mask_pred.device)\n idx += shift[:, None]\n point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(\n batch_size, num_uncertain_points, 2)\n if num_random_points > 0:\n rand_roi_coords = torch.rand(\n batch_size, num_random_points, 2, device=mask_pred.device)\n point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)\n return point_coords\n\n def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):\n \"\"\"Get ``num_points`` most uncertain points during test.\n\n Args:\n mask_pred (Tensor): A tensor of shape (num_rois, num_classes,\n mask_height, mask_width) for class-specific or class-agnostic\n prediction.\n pred_label (list): The predication class for each instance.\n cfg (dict): Testing config of point head.\n\n Returns:\n point_indices (Tensor): A tensor of shape (num_rois, num_points)\n that contains indices from [0, mask_height x mask_width) of the\n most uncertain points.\n point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)\n that contains [0, 1] x [0, 1] normalized coordinates of the\n most uncertain points from the [mask_height, mask_width] grid .\n \"\"\"\n num_points = cfg.subdivision_num_points\n uncertainty_map = self._get_uncertainty(mask_pred, pred_label)\n num_rois, _, mask_height, mask_width = uncertainty_map.shape\n h_step = 1.0 / mask_height\n w_step = 1.0 / mask_width\n\n uncertainty_map = uncertainty_map.view(num_rois,\n mask_height * mask_width)\n num_points = min(mask_height * mask_width, num_points)\n point_indices = uncertainty_map.topk(num_points, dim=1)[1]\n point_coords = uncertainty_map.new_zeros(num_rois, num_points, 2)\n point_coords[:, :, 0] = w_step / 2.0 + (point_indices %\n mask_width).float() * w_step\n point_coords[:, :, 1] = h_step / 2.0 + (point_indices //\n mask_width).float() * h_step\n return point_indices, point_coords\n", "import argparse\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nfrom .utils import binarize, get_fast_pq, remap_label\n\ntissue_types = [\n \"Adrenal_gland\",\n \"Bile-duct\",\n \"Bladder\",\n \"Breast\",\n \"Cervix\",\n \"Colon\",\n \"Esophagus\",\n \"HeadNeck\",\n \"Kidney\",\n \"Liver\",\n \"Lung\",\n \"Ovarian\",\n \"Pancreatic\",\n \"Prostate\",\n \"Skin\",\n \"Stomach\",\n \"Testis\",\n \"Thyroid\",\n \"Uterus\",\n]\n\n\ndef calculate_pq(true_path, pred_path, save_path):\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n\n # load the data\n true = np.load(true_path)\n pred = np.load(pred_path)\n\n mPQ_all = []\n bPQ_all = []\n\n # loop over the images\n for i in range(true.shape[0]):\n pq = []\n pred_bin = binarize(pred[i, :, :, :5])\n true_bin = binarize(true[i, :, :, :5])\n\n if len(np.unique(true_bin)) == 1:\n pq_bin = (\n np.nan\n ) # if ground truth is empty for that class, skip from calculation\n else:\n [_, _, pq_bin], _ = get_fast_pq(true_bin, pred_bin) # compute PQ\n\n # loop over the classes\n for j in range(5):\n pred_tmp = pred[i, :, :, j]\n pred_tmp = pred_tmp.astype(\"int32\")\n true_tmp = true[i, :, :, j]\n true_tmp = true_tmp.astype(\"int32\")\n pred_tmp = remap_label(pred_tmp)\n true_tmp = remap_label(true_tmp)\n\n if len(np.unique(true_tmp)) == 1:\n pq_tmp = (\n np.nan\n ) # if ground truth is empty for that class, skip from calculation\n else:\n [_, _, pq_tmp], _ = get_fast_pq(true_tmp, pred_tmp) # compute PQ\n\n pq.append(pq_tmp)\n\n mPQ_all.append(pq)\n bPQ_all.append([pq_bin])\n\n # using np.nanmean skips values with nan from the mean calculation\n mPQ_each_image = [np.nanmean(pq) for pq in mPQ_all]\n bPQ_each_image = [np.nanmean(pq_bin) for pq_bin in bPQ_all]\n\n # class metric\n neo_PQ = np.nanmean([pq[0] for pq in mPQ_all])\n inflam_PQ = np.nanmean([pq[1] for pq in mPQ_all])\n conn_PQ = np.nanmean([pq[2] for pq in mPQ_all])\n dead_PQ = np.nanmean([pq[3] for pq in mPQ_all])\n nonneo_PQ = np.nanmean([pq[4] for pq in mPQ_all])\n\n # Print for each class\n print(\"Printing calculated metrics on a single split\")\n print(\"-\" * 40)\n print(\"Neoplastic PQ: {}\".format(neo_PQ))\n print(\"Inflammatory PQ: {}\".format(inflam_PQ))\n print(\"Connective PQ: {}\".format(conn_PQ))\n print(\"Dead PQ: {}\".format(dead_PQ))\n print(\"Non-Neoplastic PQ: {}\".format(nonneo_PQ))\n print(\"-\" * 40)\n\n # Save per-class metrics as a csv file\n for_dataframe = {\n \"Class Name\": [\"Neoplastic\", \"Inflam\", \"Connective\", \"Dead\", \"Non-Neoplastic\"],\n \"PQ\": [neo_PQ, conn_PQ, conn_PQ, dead_PQ, nonneo_PQ],\n }\n df = pd.DataFrame(for_dataframe, columns=[\"Tissue name\", \"PQ\"])\n df.to_csv(save_path + \"/class_stats.csv\")\n\n # Print for each tissue\n all_tissue_mPQ = []\n all_tissue_bPQ = []\n for tissue_name in [\"breast\"]:\n indices = [i for i, x in enumerate(true)]\n tissue_PQ = [mPQ_each_image[i] for i in indices]\n print(\"{} PQ: {} \".format(tissue_name, np.nanmean(tissue_PQ)))\n tissue_PQ_bin = [bPQ_each_image[i] for i in indices]\n print(\"{} PQ binary: {} \".format(tissue_name, np.nanmean(tissue_PQ_bin)))\n all_tissue_mPQ.append(np.nanmean(tissue_PQ))\n all_tissue_bPQ.append(np.nanmean(tissue_PQ_bin))\n\n for_dataframe = {\n \"Tissue name\": [\"breast\"],\n \"PQ\": all_tissue_mPQ,\n \"PQ bin\": all_tissue_bPQ,\n }\n print(for_dataframe)\n df = pd.DataFrame(for_dataframe, columns=[\"Tissue name\", \"PQ\", \"PQ bin\"])\n df.to_csv(save_path + \"/tissue_stats.csv\")\n\n # Show overall metrics - mPQ is average PQ over the classes and the tissues, bPQ is average binary PQ over the tissues\n print(\"-\" * 40)\n print(\"Average mPQ:{}\".format(np.nanmean(all_tissue_mPQ)))\n print(\"Average bPQ:{}\".format(np.nanmean(all_tissue_bPQ)))\n\n return np.nanmean(all_tissue_mPQ), np.nanmean(all_tissue_bPQ)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--true_path\", type=str, required=True)\n parser.add_argument(\"--pred_path\", type=str, required=True)\n parser.add_argument(\"--save_path\", type=str, default=\"results/\")\n args = parser.parse_args()\n calculate_pq(\n args.true_path,\n args.pred_path,\n args.save_path,\n )\n", "# Copyright 2021 solo-learn development team.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies\n# or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE\n# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport torch\nfrom solo.losses.vicreg import invariance_loss, variance_loss\nfrom torch import Tensor\nfrom torch.nn import functional as F\n\n\ndef covariance_loss(z1: Tensor, z2: Tensor) -> Tensor:\n \"\"\"Computes normalized covariance loss given batch of projected features z1 from view 1 and\n projected features z2 from view 2.\n\n Args:\n z1 (torch.Tensor): NxD Tensor containing projected features from view 1.\n z2 (torch.Tensor): NxD Tensor containing projected features from view 2.\n\n Returns:\n torch.Tensor: covariance regularization loss.\n \"\"\"\n\n norm_z1 = z1 - z1.mean(dim=0)\n norm_z2 = z2 - z2.mean(dim=0)\n norm_z1 = F.normalize(norm_z1, p=2, dim=0) # (batch * feature); l2-norm\n norm_z2 = F.normalize(norm_z2, p=2, dim=0)\n fxf_cov_z1 = torch.mm(norm_z1.T, norm_z1) # (feature * feature)\n fxf_cov_z2 = torch.mm(norm_z2.T, norm_z2)\n fxf_cov_z1.fill_diagonal_(0.0)\n fxf_cov_z2.fill_diagonal_(0.0)\n cov_loss = (fxf_cov_z1 ** 2).mean() + (fxf_cov_z2 ** 2).mean()\n return cov_loss\n\n\ndef vibcreg_loss_func(\n z1: torch.Tensor,\n z2: torch.Tensor,\n sim_loss_weight: float = 25.0,\n var_loss_weight: float = 25.0,\n cov_loss_weight: float = 200.0,\n) -> torch.Tensor:\n \"\"\"Computes VIbCReg's loss given batch of projected features z1 from view 1 and\n projected features z2 from view 2.\n\n Args:\n z1 (torch.Tensor): NxD Tensor containing projected features from view 1.\n z2 (torch.Tensor): NxD Tensor containing projected features from view 2.\n sim_loss_weight (float): invariance loss weight.\n var_loss_weight (float): variance loss weight.\n cov_loss_weight (float): covariance loss weight.\n\n Returns:\n torch.Tensor: VIbCReg loss.\n \"\"\"\n\n sim_loss = invariance_loss(z1, z2)\n var_loss = variance_loss(z1, z2)\n cov_loss = covariance_loss(z1, z2)\n\n loss = (\n sim_loss_weight * sim_loss\n + var_loss_weight * var_loss\n + cov_loss_weight * cov_loss\n )\n return loss\n", "# Copyright 2021 solo-learn development team.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies\n# or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE\n# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport argparse\nfrom typing import Any, Dict, List, Sequence\n\nimport torch\nimport torch.nn as nn\n\nfrom solo.losses.simclr import simclr_loss_func\nfrom solo.methods.base import BaseMethod\n\n\nclass SimCLR(BaseMethod):\n def __init__(\n self, proj_output_dim: int, proj_hidden_dim: int, temperature: float, **kwargs\n ):\n \"\"\"Implements SimCLR (https://arxiv.org/abs/2002.05709).\n\n Args:\n proj_output_dim (int): number of dimensions of the projected features.\n proj_hidden_dim (int): number of neurons in the hidden layers of the projector.\n temperature (float): temperature for the softmax in the contrastive loss.\n \"\"\"\n\n super().__init__(**kwargs)\n self.no_labels = kwargs[\"no_labels\"]\n\n self.temperature = temperature\n\n # projector\n self.projector = nn.Sequential(\n nn.Linear(self.features_dim, proj_hidden_dim),\n nn.ReLU(),\n nn.Linear(proj_hidden_dim, proj_output_dim),\n )\n\n @staticmethod\n def add_model_specific_args(\n parent_parser: argparse.ArgumentParser,\n ) -> argparse.ArgumentParser:\n parent_parser = super(SimCLR, SimCLR).add_model_specific_args(parent_parser)\n parser = parent_parser.add_argument_group(\"simclr\")\n\n # projector\n parser.add_argument(\"--proj_output_dim\", type=int, default=128)\n parser.add_argument(\"--proj_hidden_dim\", type=int, default=2048)\n\n # parameters\n parser.add_argument(\"--temperature\", type=float, default=0.1)\n\n return parent_parser\n\n @property\n def learnable_params(self) -> List[dict]:\n \"\"\"Adds projector parameters to the parent's learnable parameters.\n\n Returns:\n List[dict]: list of learnable parameters.\n \"\"\"\n\n extra_learnable_params = [{\"params\": self.projector.parameters()}]\n return super().learnable_params + extra_learnable_params\n\n def forward(self, X: torch.tensor, *args, **kwargs) -> Dict[str, Any]:\n \"\"\"Performs the forward pass of the backbone, the projector.\n\n Args:\n X (torch.Tensor): a batch of images in the tensor format.\n\n Returns:\n Dict[str, Any]:\n a dict containing the outputs of the parent\n and the projected features.\n \"\"\"\n\n out = super().forward(X, *args, **kwargs)\n z = self.projector(out[\"feats\"])\n return {**out, \"z\": z}\n\n def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:\n \"\"\"Training step for SimCLR reusing BaseMethod training step.\n\n Args:\n batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where\n [X] is a list of size num_crops containing batches of images.\n batch_idx (int): index of the batch.\n\n Returns:\n torch.Tensor: total loss composed of SimCLR loss and classification loss.\n \"\"\"\n\n indexes = batch[0]\n\n out = super().training_step(batch, batch_idx)\n class_loss = out[\"loss\"]\n\n feats = out[\"feats\"]\n\n z = torch.cat([self.projector(f) for f in feats])\n\n # ------- contrastive loss -------\n n_augs = self.num_large_crops + self.num_small_crops\n indexes = indexes.repeat(n_augs)\n\n nce_loss = simclr_loss_func(\n z,\n indexes=indexes,\n temperature=self.temperature,\n )\n\n self.log(\"train_nce_loss\", nce_loss, on_epoch=True, sync_dist=True)\n\n if self.no_labels:\n return nce_loss\n else:\n return nce_loss + class_loss\n", "import copy\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, Scale\n\nfrom mmdet.models.dense_heads.fcos_head import FCOSHead\nfrom ..builder import HEADS\n\n\[email protected]_module()\nclass NASFCOSHead(FCOSHead):\n \"\"\"Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.\n\n It is quite similar with FCOS head, except for the searched structure of\n classification branch and bbox regression branch, where a structure of\n \"dconv3x3, conv3x3, dconv3x3, conv1x1\" is utilized instead.\n \"\"\"\n\n def __init__(self, *args, init_cfg=None, **kwargs):\n if init_cfg is None:\n init_cfg = [\n dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']),\n dict(\n type='Normal',\n std=0.01,\n override=[\n dict(name='conv_reg'),\n dict(name='conv_centerness'),\n dict(\n name='conv_cls',\n type='Normal',\n std=0.01,\n bias_prob=0.01)\n ]),\n ]\n super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs)\n\n def _init_layers(self):\n \"\"\"Initialize layers of the head.\"\"\"\n dconv3x3_config = dict(\n type='DCNv2',\n kernel_size=3,\n use_bias=True,\n deform_groups=2,\n padding=1)\n conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)\n conv1x1_config = dict(type='Conv', kernel_size=1)\n\n self.arch_config = [\n dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config\n ]\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i, op_ in enumerate(self.arch_config):\n op = copy.deepcopy(op_)\n chn = self.in_channels if i == 0 else self.feat_channels\n assert isinstance(op, dict)\n use_bias = op.pop('use_bias', False)\n padding = op.pop('padding', 0)\n kernel_size = op.pop('kernel_size')\n module = ConvModule(\n chn,\n self.feat_channels,\n kernel_size,\n stride=1,\n padding=padding,\n norm_cfg=self.norm_cfg,\n bias=use_bias,\n conv_cfg=op)\n\n self.cls_convs.append(copy.deepcopy(module))\n self.reg_convs.append(copy.deepcopy(module))\n\n self.conv_cls = nn.Conv2d(\n self.feat_channels, self.cls_out_channels, 3, padding=1)\n self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)\n\n self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n", "import torch\n\nfrom mmdet.utils import util_mixins\n\n\nclass SamplingResult(util_mixins.NiceRepr):\n \"\"\"Bbox sampling result.\n\n Example:\n >>> # xdoctest: +IGNORE_WANT\n >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA\n >>> self = SamplingResult.random(rng=10)\n >>> print(f'self = {self}')\n self = <SamplingResult({\n 'neg_bboxes': torch.Size([12, 4]),\n 'neg_inds': tensor([ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12]),\n 'num_gts': 4,\n 'pos_assigned_gt_inds': tensor([], dtype=torch.int64),\n 'pos_bboxes': torch.Size([0, 4]),\n 'pos_inds': tensor([], dtype=torch.int64),\n 'pos_is_gt': tensor([], dtype=torch.uint8)\n })>\n \"\"\"\n\n def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,\n gt_flags):\n self.pos_inds = pos_inds\n self.neg_inds = neg_inds\n self.pos_bboxes = bboxes[pos_inds]\n self.neg_bboxes = bboxes[neg_inds]\n self.pos_is_gt = gt_flags[pos_inds]\n\n self.num_gts = gt_bboxes.shape[0]\n self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1\n\n if gt_bboxes.numel() == 0:\n # hack for index error case\n assert self.pos_assigned_gt_inds.numel() == 0\n self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)\n else:\n if len(gt_bboxes.shape) < 2:\n gt_bboxes = gt_bboxes.view(-1, 4)\n\n self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :]\n\n if assign_result.labels is not None:\n self.pos_gt_labels = assign_result.labels[pos_inds]\n else:\n self.pos_gt_labels = None\n\n @property\n def bboxes(self):\n \"\"\"torch.Tensor: concatenated positive and negative boxes\"\"\"\n return torch.cat([self.pos_bboxes, self.neg_bboxes])\n\n def to(self, device):\n \"\"\"Change the device of the data inplace.\n\n Example:\n >>> self = SamplingResult.random()\n >>> print(f'self = {self.to(None)}')\n >>> # xdoctest: +REQUIRES(--gpu)\n >>> print(f'self = {self.to(0)}')\n \"\"\"\n _dict = self.__dict__\n for key, value in _dict.items():\n if isinstance(value, torch.Tensor):\n _dict[key] = value.to(device)\n return self\n\n def __nice__(self):\n data = self.info.copy()\n data['pos_bboxes'] = data.pop('pos_bboxes').shape\n data['neg_bboxes'] = data.pop('neg_bboxes').shape\n parts = [f\"'{k}': {v!r}\" for k, v in sorted(data.items())]\n body = ' ' + ',\\n '.join(parts)\n return '{\\n' + body + '\\n}'\n\n @property\n def info(self):\n \"\"\"Returns a dictionary of info about the object.\"\"\"\n return {\n 'pos_inds': self.pos_inds,\n 'neg_inds': self.neg_inds,\n 'pos_bboxes': self.pos_bboxes,\n 'neg_bboxes': self.neg_bboxes,\n 'pos_is_gt': self.pos_is_gt,\n 'num_gts': self.num_gts,\n 'pos_assigned_gt_inds': self.pos_assigned_gt_inds,\n }\n\n @classmethod\n def random(cls, rng=None, **kwargs):\n \"\"\"\n Args:\n rng (None | int | numpy.random.RandomState): seed or state.\n kwargs (keyword arguments):\n - num_preds: number of predicted boxes\n - num_gts: number of true boxes\n - p_ignore (float): probability of a predicted box assigned to \\\n an ignored truth.\n - p_assigned (float): probability of a predicted box not being \\\n assigned.\n - p_use_label (float | bool): with labels or not.\n\n Returns:\n :obj:`SamplingResult`: Randomly generated sampling result.\n\n Example:\n >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA\n >>> self = SamplingResult.random()\n >>> print(self.__dict__)\n \"\"\"\n from mmdet.core.bbox.samplers.random_sampler import RandomSampler\n from mmdet.core.bbox.assigners.assign_result import AssignResult\n from mmdet.core.bbox import demodata\n rng = demodata.ensure_rng(rng)\n\n # make probabalistic?\n num = 32\n pos_fraction = 0.5\n neg_pos_ub = -1\n\n assign_result = AssignResult.random(rng=rng, **kwargs)\n\n # Note we could just compute an assignment\n bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng)\n gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng)\n\n if rng.rand() > 0.2:\n # sometimes algorithms squeeze their data, be robust to that\n gt_bboxes = gt_bboxes.squeeze()\n bboxes = bboxes.squeeze()\n\n if assign_result.labels is None:\n gt_labels = None\n else:\n gt_labels = None # todo\n\n if gt_labels is None:\n add_gt_as_proposals = False\n else:\n add_gt_as_proposals = True # make probabalistic?\n\n sampler = RandomSampler(\n num,\n pos_fraction,\n neg_pos_ub=neg_pos_ub,\n add_gt_as_proposals=add_gt_as_proposals,\n rng=rng)\n self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)\n return self\n" ]
[ [ "torch.LongTensor", "numpy.expand_dims", "torch.from_numpy", "torch.tensor", "numpy.ones", "torch.FloatTensor", "numpy.zeros" ], [ "torch.cat" ], [ "torch.abs", "torch.cat", "torch.nn.ModuleList", "torch.zeros_like", "torch.nn.Conv1d", "torch.rand", "torch.arange", "torch.topk" ], [ "numpy.load", "numpy.nanmean", "pandas.DataFrame", "numpy.unique" ], [ "torch.nn.functional.normalize", "torch.mm" ], [ "torch.nn.Linear", "torch.nn.ReLU" ], [ "torch.nn.ModuleList", "torch.nn.Conv2d" ], [ "torch.empty_like", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shaandesai1/transfer_diffeq
[ "29ab4f3ff16a58bc7b1751428e540a3bb135778c" ]
[ "visualizer.py" ]
[ "\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nfrom neurodiffeq import diff # the differentiation operation\nfrom neurodiffeq.conditions import IVP # the initial condition\nfrom neurodiffeq.networks import FCNN # fully-connect neural network\nfrom neurodiffeq.solvers import Solver1D\nfrom neurodiffeq.callbacks import WeightCallback\nfrom neurodiffeq.callbacks import WeightCallback1, SolutionCallback, SaddleCallback\nfrom neurodiffeq.callbacks import PeriodLocal\nfrom sklearn.metrics import mean_squared_error\n# from sklearn.metrics.pairwise import cosine_similarity\nimport copy\nimport matplotlib.pyplot as plt\n\nDIFFEQS_TRAIN = {\n 'exp': lambda u, t: [diff(u, t) + u],\n 'exp1': lambda u, t: [diff(u, t) - u],\n 'tanh': lambda u, t: [diff(u, t) + u ** 2 - 1],\n 'psig': lambda u, t: [diff(u, t) - 3 * u + u ** 2],\n 'r1': lambda u, t: [diff(u, t) - u + u ** 2 + u ** 3],\n 'r2': lambda u, t: [diff(u, t) + u + u ** 2],\n 'r3': lambda u, t: [diff(u, t) + u ** 2],\n 'r4': lambda u, t: [diff(u, t) - u ** 2],\n 'q1': lambda u, t: [diff(u, t) - u + u ** 2],\n 'q2': lambda u, t: [diff(u, t) - u + u ** 2 - u ** 3],\n 'q3': lambda u, t: [diff(u, t) + u ** 2 + u ** 4],\n 'q4': lambda u, t: [diff(u, t) - u ** 2 - u ** 4],\n 'high_order1': lambda u, t: [diff(u, t) + u - u ** 2 + u ** 3 - u ** 4 + u ** 5],\n 'high_order2': lambda u, t: [diff(u, t) - u + u ** 2 - u ** 3 + u ** 4 - u ** 5],\n 'baseline': lambda u, t: [diff(u,t)]\n}\n\n\nsolsa = np.load('data/q3_train_solution/3000.npy')\nsolsb = np.load('data/baseline_train_solution/3000.npy')\nanalytical =np.load('data/q3_gt_test_solution/3000.npy')\n# pre1 =np.load('data/q2_q2_pretrain_500_solution/500.npy')\n# pre2 =np.load('data/baseline_q2_pretrain_500_solution/500.npy')\n\nplt.figure()\nplt.plot(solsa,label='q2')\nplt.plot(solsb,label='high_order_2')\nplt.plot(analytical,label='analytical_q2')\n# plt.plot(pre1,label='pre_q2_q2')\n# plt.plot(pre2,label='pre_baseline_q2')\nplt.legend()\nplt.show()" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.plot", "numpy.load", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
talahajeer/scikit-learn
[ "d66b42708a5912039740cd08f747229433e579b5", "d66b42708a5912039740cd08f747229433e579b5", "d66b42708a5912039740cd08f747229433e579b5", "d66b42708a5912039740cd08f747229433e579b5", "d66b42708a5912039740cd08f747229433e579b5", "d66b42708a5912039740cd08f747229433e579b5", "d66b42708a5912039740cd08f747229433e579b5", "d66b42708a5912039740cd08f747229433e579b5", "d66b42708a5912039740cd08f747229433e579b5" ]
[ "sklearn/preprocessing/tests/test_polynomial.py", "sklearn/impute/_base.py", "sklearn/model_selection/tests/test_successive_halving.py", "examples/model_selection/plot_grid_search_stats.py", "sklearn/neural_network/_stochastic_optimizers.py", "examples/ensemble/plot_forest_importances_faces.py", "sklearn/datasets/tests/test_california_housing.py", "sklearn/datasets/tests/test_covtype.py", "sklearn/neighbors/tests/test_dist_metrics.py" ]
[ "import numpy as np\nimport pytest\nfrom scipy import sparse\nfrom scipy.sparse import random as sparse_random\nfrom sklearn.utils._testing import assert_array_almost_equal\n\nfrom numpy.testing import assert_allclose, assert_array_equal\nfrom scipy.interpolate import BSpline\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import (\n KBinsDiscretizer,\n PolynomialFeatures,\n SplineTransformer,\n)\nfrom sklearn.utils.fixes import linspace, sp_version, parse_version\n\n\[email protected](\"est\", (PolynomialFeatures, SplineTransformer))\ndef test_polynomial_and_spline_array_order(est):\n \"\"\"Test that output array has the given order.\"\"\"\n X = np.arange(10).reshape(5, 2)\n\n def is_c_contiguous(a):\n return np.isfortran(a.T)\n\n assert is_c_contiguous(est().fit_transform(X))\n assert is_c_contiguous(est(order=\"C\").fit_transform(X))\n assert np.isfortran(est(order=\"F\").fit_transform(X))\n\n\[email protected](\n \"params, err_msg\",\n [\n ({\"degree\": -1}, \"degree must be a non-negative integer\"),\n ({\"degree\": 2.5}, \"degree must be a non-negative integer\"),\n ({\"degree\": \"string\"}, \"degree must be a non-negative integer\"),\n ({\"n_knots\": 1}, \"n_knots must be a positive integer >= 2.\"),\n ({\"n_knots\": 1}, \"n_knots must be a positive integer >= 2.\"),\n ({\"n_knots\": 2.5}, \"n_knots must be a positive integer >= 2.\"),\n ({\"n_knots\": \"string\"}, \"n_knots must be a positive integer >= 2.\"),\n ({\"knots\": 1}, \"Expected 2D array, got scalar array instead:\"),\n ({\"knots\": [1, 2]}, \"Expected 2D array, got 1D array instead:\"),\n (\n {\"knots\": [[1]]},\n r\"Number of knots, knots.shape\\[0\\], must be >= 2.\",\n ),\n (\n {\"knots\": [[1, 5], [2, 6]]},\n r\"knots.shape\\[1\\] == n_features is violated.\",\n ),\n (\n {\"knots\": [[1], [1], [2]]},\n \"knots must be sorted without duplicates.\",\n ),\n ({\"knots\": [[2], [1]]}, \"knots must be sorted without duplicates.\"),\n (\n {\"extrapolation\": None},\n \"extrapolation must be one of 'error', 'constant', 'linear', \"\n \"'continue' or 'periodic'.\",\n ),\n (\n {\"extrapolation\": 1},\n \"extrapolation must be one of 'error', 'constant', 'linear', \"\n \"'continue' or 'periodic'.\",\n ),\n (\n {\"extrapolation\": \"string\"},\n \"extrapolation must be one of 'error', 'constant', 'linear', \"\n \"'continue' or 'periodic'.\",\n ),\n ({\"include_bias\": None}, \"include_bias must be bool.\"),\n ({\"include_bias\": 1}, \"include_bias must be bool.\"),\n ({\"include_bias\": \"string\"}, \"include_bias must be bool.\"),\n (\n {\"extrapolation\": \"periodic\", \"n_knots\": 3, \"degree\": 3},\n \"Periodic splines require degree < n_knots. Got n_knots=3 and degree=3.\",\n ),\n (\n {\"extrapolation\": \"periodic\", \"knots\": [[0], [1]], \"degree\": 2},\n \"Periodic splines require degree < n_knots. Got n_knots=2 and degree=2.\",\n ),\n ],\n)\ndef test_spline_transformer_input_validation(params, err_msg):\n \"\"\"Test that we raise errors for invalid input in SplineTransformer.\"\"\"\n X = [[1], [2]]\n\n with pytest.raises(ValueError, match=err_msg):\n SplineTransformer(**params).fit(X)\n\n\ndef test_spline_transformer_manual_knot_input():\n \"\"\"\n Test that array-like knot positions in SplineTransformer are accepted.\n \"\"\"\n X = np.arange(20).reshape(10, 2)\n knots = [[0.5, 1], [1.5, 2], [5, 10]]\n st1 = SplineTransformer(degree=3, knots=knots, n_knots=None).fit(X)\n knots = np.asarray(knots)\n st2 = SplineTransformer(degree=3, knots=knots, n_knots=None).fit(X)\n for i in range(X.shape[1]):\n assert_allclose(st1.bsplines_[i].t, st2.bsplines_[i].t)\n\n\[email protected](\"extrapolation\", [\"continue\", \"periodic\"])\ndef test_spline_transformer_integer_knots(extrapolation):\n \"\"\"Test that SplineTransformer accepts integer value knot positions.\"\"\"\n X = np.arange(20).reshape(10, 2)\n knots = [[0, 1], [1, 2], [5, 5], [11, 10], [12, 11]]\n _ = SplineTransformer(\n degree=3, knots=knots, extrapolation=extrapolation\n ).fit_transform(X)\n\n\ndef test_spline_transformer_feature_names():\n \"\"\"Test that SplineTransformer generates correct features name.\"\"\"\n X = np.arange(20).reshape(10, 2)\n splt = SplineTransformer(n_knots=3, degree=3, include_bias=True).fit(X)\n feature_names = splt.get_feature_names()\n assert_array_equal(\n feature_names,\n [\n \"x0_sp_0\",\n \"x0_sp_1\",\n \"x0_sp_2\",\n \"x0_sp_3\",\n \"x0_sp_4\",\n \"x1_sp_0\",\n \"x1_sp_1\",\n \"x1_sp_2\",\n \"x1_sp_3\",\n \"x1_sp_4\",\n ],\n )\n\n splt = SplineTransformer(n_knots=3, degree=3, include_bias=False).fit(X)\n feature_names = splt.get_feature_names([\"a\", \"b\"])\n assert_array_equal(\n feature_names,\n [\n \"a_sp_0\",\n \"a_sp_1\",\n \"a_sp_2\",\n \"a_sp_3\",\n \"b_sp_0\",\n \"b_sp_1\",\n \"b_sp_2\",\n \"b_sp_3\",\n ],\n )\n\n\[email protected](\"degree\", range(1, 5))\[email protected](\"n_knots\", range(3, 5))\[email protected](\"knots\", [\"uniform\", \"quantile\"])\[email protected](\"extrapolation\", [\"constant\", \"periodic\"])\ndef test_spline_transformer_unity_decomposition(degree, n_knots, knots, extrapolation):\n \"\"\"Test that B-splines are indeed a decomposition of unity.\n\n Splines basis functions must sum up to 1 per row, if we stay in between\n boundaries.\n \"\"\"\n X = np.linspace(0, 1, 100)[:, None]\n # make the boundaries 0 and 1 part of X_train, for sure.\n X_train = np.r_[[[0]], X[::2, :], [[1]]]\n X_test = X[1::2, :]\n\n if extrapolation == \"periodic\":\n n_knots = n_knots + degree # periodic splines require degree < n_knots\n\n splt = SplineTransformer(\n n_knots=n_knots,\n degree=degree,\n knots=knots,\n include_bias=True,\n extrapolation=extrapolation,\n )\n splt.fit(X_train)\n for X in [X_train, X_test]:\n assert_allclose(np.sum(splt.transform(X), axis=1), 1)\n\n\[email protected]([\"bias\", \"intercept\"], [(True, False), (False, True)])\ndef test_spline_transformer_linear_regression(bias, intercept):\n \"\"\"Test that B-splines fit a sinusodial curve pretty well.\"\"\"\n X = np.linspace(0, 10, 100)[:, None]\n y = np.sin(X[:, 0]) + 2 # +2 to avoid the value 0 in assert_allclose\n pipe = Pipeline(\n steps=[\n (\n \"spline\",\n SplineTransformer(\n n_knots=15,\n degree=3,\n include_bias=bias,\n extrapolation=\"constant\",\n ),\n ),\n (\"ols\", LinearRegression(fit_intercept=intercept)),\n ]\n )\n pipe.fit(X, y)\n assert_allclose(pipe.predict(X), y, rtol=1e-3)\n\n\[email protected](\n \"knots, n_knots, degree\",\n [\n (\"uniform\", 5, 3),\n (\"uniform\", 12, 8),\n (\n [[-1.0, 0.0], [0, 1.0], [0.1, 2.0], [0.2, 3.0], [0.3, 4.0], [1, 5.0]],\n None,\n 3,\n ),\n ],\n)\ndef test_spline_transformer_periodicity_of_extrapolation(knots, n_knots, degree):\n \"\"\"Test that the SplineTransformer is periodic for multiple features.\"\"\"\n X_1 = linspace((-1, 0), (1, 5), 10)\n X_2 = linspace((1, 5), (3, 10), 10)\n\n splt = SplineTransformer(\n knots=knots, n_knots=n_knots, degree=degree, extrapolation=\"periodic\"\n )\n splt.fit(X_1)\n\n assert_allclose(splt.transform(X_1), splt.transform(X_2))\n\n\[email protected]([\"bias\", \"intercept\"], [(True, False), (False, True)])\ndef test_spline_transformer_periodic_linear_regression(bias, intercept):\n \"\"\"Test that B-splines fit a periodic curve pretty well.\"\"\"\n # \"+ 3\" to avoid the value 0 in assert_allclose\n def f(x):\n return np.sin(2 * np.pi * x) - np.sin(8 * np.pi * x) + 3\n\n X = np.linspace(0, 1, 101)[:, None]\n pipe = Pipeline(\n steps=[\n (\n \"spline\",\n SplineTransformer(\n n_knots=20,\n degree=3,\n include_bias=bias,\n extrapolation=\"periodic\",\n ),\n ),\n (\"ols\", LinearRegression(fit_intercept=intercept)),\n ]\n )\n pipe.fit(X, f(X[:, 0]))\n\n # Generate larger array to check periodic extrapolation\n X_ = np.linspace(-1, 2, 301)[:, None]\n predictions = pipe.predict(X_)\n assert_allclose(predictions, f(X_[:, 0]), atol=0.01, rtol=0.01)\n assert_allclose(predictions[0:100], predictions[100:200], rtol=1e-3)\n\n\[email protected](\n sp_version < parse_version(\"1.0.0\"),\n reason=\"Periodic extrapolation not yet implemented for BSpline.\",\n)\ndef test_spline_transformer_periodic_spline_backport():\n \"\"\"Test that the backport of extrapolate=\"periodic\" works correctly\"\"\"\n X = np.linspace(-2, 3.5, 10)[:, None]\n degree = 2\n\n # Use periodic extrapolation backport in SplineTransformer\n transformer = SplineTransformer(\n degree=degree, extrapolation=\"periodic\", knots=[[-1.0], [0.0], [1.0]]\n )\n Xt = transformer.fit_transform(X)\n\n # Use periodic extrapolation in BSpline\n coef = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])\n spl = BSpline(np.arange(-3, 4), coef, degree, \"periodic\")\n Xspl = spl(X[:, 0])\n assert_allclose(Xt, Xspl)\n\n\ndef test_spline_transformer_periodic_splines_periodicity():\n \"\"\"\n Test if shifted knots result in the same transformation up to permutation.\n \"\"\"\n X = np.linspace(0, 10, 101)[:, None]\n\n transformer_1 = SplineTransformer(\n degree=3,\n extrapolation=\"periodic\",\n knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]],\n )\n\n transformer_2 = SplineTransformer(\n degree=3,\n extrapolation=\"periodic\",\n knots=[[1.0], [3.0], [4.0], [5.0], [8.0], [9.0]],\n )\n\n Xt_1 = transformer_1.fit_transform(X)\n Xt_2 = transformer_2.fit_transform(X)\n\n assert_allclose(Xt_1, Xt_2[:, [4, 0, 1, 2, 3]])\n\n\[email protected](\"degree\", [3, 5])\ndef test_spline_transformer_periodic_splines_smoothness(degree):\n \"\"\"Test that spline transformation is smooth at first / last knot.\"\"\"\n X = np.linspace(-2, 10, 10_000)[:, None]\n\n transformer = SplineTransformer(\n degree=degree,\n extrapolation=\"periodic\",\n knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]],\n )\n Xt = transformer.fit_transform(X)\n\n delta = (X.max() - X.min()) / len(X)\n tol = 10 * delta\n\n dXt = Xt\n # We expect splines of degree `degree` to be (`degree`-1) times\n # continuously differentiable. I.e. for d = 0, ..., `degree` - 1 the d-th\n # derivative should be continous. This is the case if the (d+1)-th\n # numerical derivative is reasonably small (smaller than `tol` in absolute\n # value). We thus compute d-th numeric derivatives for d = 1, ..., `degree`\n # and compare them to `tol`.\n #\n # Note that the 0-th derivative is the function itself, such that we are\n # also checking its continuity.\n for d in range(1, degree + 1):\n # Check continuity of the (d-1)-th derivative\n diff = np.diff(dXt, axis=0)\n assert np.abs(diff).max() < tol\n # Compute d-th numeric derivative\n dXt = diff / delta\n\n # As degree `degree` splines are not `degree` times continously\n # differentiable at the knots, the `degree + 1`-th numeric derivative\n # should have spikes at the knots.\n diff = np.diff(dXt, axis=0)\n assert np.abs(diff).max() > 1\n\n\[email protected]([\"bias\", \"intercept\"], [(True, False), (False, True)])\[email protected](\"degree\", [1, 2, 3, 4, 5])\ndef test_spline_transformer_extrapolation(bias, intercept, degree):\n \"\"\"Test that B-spline extrapolation works correctly.\"\"\"\n # we use a straight line for that\n X = np.linspace(-1, 1, 100)[:, None]\n y = X.squeeze()\n\n # 'constant'\n pipe = Pipeline(\n [\n [\n \"spline\",\n SplineTransformer(\n n_knots=4,\n degree=degree,\n include_bias=bias,\n extrapolation=\"constant\",\n ),\n ],\n [\"ols\", LinearRegression(fit_intercept=intercept)],\n ]\n )\n pipe.fit(X, y)\n assert_allclose(pipe.predict([[-10], [5]]), [-1, 1])\n\n # 'linear'\n pipe = Pipeline(\n [\n [\n \"spline\",\n SplineTransformer(\n n_knots=4,\n degree=degree,\n include_bias=bias,\n extrapolation=\"linear\",\n ),\n ],\n [\"ols\", LinearRegression(fit_intercept=intercept)],\n ]\n )\n pipe.fit(X, y)\n assert_allclose(pipe.predict([[-10], [5]]), [-10, 5])\n\n # 'error'\n splt = SplineTransformer(\n n_knots=4, degree=degree, include_bias=bias, extrapolation=\"error\"\n )\n splt.fit(X)\n with pytest.raises(ValueError):\n splt.transform([[-10]])\n with pytest.raises(ValueError):\n splt.transform([[5]])\n\n\ndef test_spline_transformer_kbindiscretizer():\n \"\"\"Test that a B-spline of degree=0 is equivalent to KBinsDiscretizer.\"\"\"\n rng = np.random.RandomState(97531)\n X = rng.randn(200).reshape(200, 1)\n n_bins = 5\n n_knots = n_bins + 1\n\n splt = SplineTransformer(\n n_knots=n_knots, degree=0, knots=\"quantile\", include_bias=True\n )\n splines = splt.fit_transform(X)\n\n kbd = KBinsDiscretizer(n_bins=n_bins, encode=\"onehot-dense\", strategy=\"quantile\")\n kbins = kbd.fit_transform(X)\n\n # Though they should be exactly equal, we test approximately with high\n # accuracy.\n assert_allclose(splines, kbins, rtol=1e-13)\n\n\[email protected](\"n_knots\", [5, 10])\[email protected](\"include_bias\", [True, False])\[email protected](\"degree\", [3, 5])\ndef test_spline_transformer_n_features_out(n_knots, include_bias, degree):\n \"\"\"Test that transform results in n_features_out_ features.\"\"\"\n splt = SplineTransformer(n_knots=n_knots, degree=degree, include_bias=include_bias)\n X = np.linspace(0, 1, 10)[:, None]\n splt.fit(X)\n\n assert splt.transform(X).shape[1] == splt.n_features_out_\n\n\[email protected](\n \"params, err_msg\",\n [\n ({\"degree\": -1}, \"degree must be a non-negative integer\"),\n ({\"degree\": 2.5}, \"degree must be a non-negative int or tuple\"),\n ({\"degree\": \"12\"}, r\"degree=\\(min_degree, max_degree\\) must\"),\n ({\"degree\": \"string\"}, \"degree must be a non-negative int or tuple\"),\n ({\"degree\": (-1, 2)}, r\"degree=\\(min_degree, max_degree\\) must\"),\n ({\"degree\": (0, 1.5)}, r\"degree=\\(min_degree, max_degree\\) must\"),\n ({\"degree\": (3, 2)}, r\"degree=\\(min_degree, max_degree\\) must\"),\n ],\n)\ndef test_polynomial_features_input_validation(params, err_msg):\n \"\"\"Test that we raise errors for invalid input in PolynomialFeatures.\"\"\"\n X = [[1], [2]]\n\n with pytest.raises(ValueError, match=err_msg):\n PolynomialFeatures(**params).fit(X)\n\n\[email protected]()\ndef single_feature_degree3():\n X = np.arange(6)[:, np.newaxis]\n P = np.hstack([np.ones_like(X), X, X ** 2, X ** 3])\n return X, P\n\n\[email protected](\n \"degree, include_bias, interaction_only, indices\",\n [\n (3, True, False, slice(None, None)),\n (3, False, False, slice(1, None)),\n (3, True, True, [0, 1]),\n (3, False, True, [1]),\n ((2, 3), True, False, [0, 2, 3]),\n ((2, 3), False, False, [2, 3]),\n ((2, 3), True, True, [0]),\n ((2, 3), False, True, []),\n ],\n)\[email protected](\n \"sparse_X\",\n [False, sparse.csr_matrix, sparse.csc_matrix],\n)\ndef test_polynomial_features_one_feature(\n single_feature_degree3,\n degree,\n include_bias,\n interaction_only,\n indices,\n sparse_X,\n):\n \"\"\"Test PolynomialFeatures on single feature up to degree 3.\"\"\"\n X, P = single_feature_degree3\n if sparse_X:\n X = sparse_X(X)\n tf = PolynomialFeatures(\n degree=degree, include_bias=include_bias, interaction_only=interaction_only\n ).fit(X)\n out = tf.transform(X)\n if sparse_X:\n out = out.toarray()\n assert_allclose(out, P[:, indices])\n if tf.n_output_features_ > 0:\n assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_)\n\n\[email protected]()\ndef two_features_degree3():\n X = np.arange(6).reshape((3, 2))\n x1 = X[:, :1]\n x2 = X[:, 1:]\n P = np.hstack(\n [\n x1 ** 0 * x2 ** 0, # 0\n x1 ** 1 * x2 ** 0, # 1\n x1 ** 0 * x2 ** 1, # 2\n x1 ** 2 * x2 ** 0, # 3\n x1 ** 1 * x2 ** 1, # 4\n x1 ** 0 * x2 ** 2, # 5\n x1 ** 3 * x2 ** 0, # 6\n x1 ** 2 * x2 ** 1, # 7\n x1 ** 1 * x2 ** 2, # 8\n x1 ** 0 * x2 ** 3, # 9\n ]\n )\n return X, P\n\n\[email protected](\n \"degree, include_bias, interaction_only, indices\",\n [\n (2, True, False, slice(0, 6)),\n (2, False, False, slice(1, 6)),\n (2, True, True, [0, 1, 2, 4]),\n (2, False, True, [1, 2, 4]),\n ((2, 2), True, False, [0, 3, 4, 5]),\n ((2, 2), False, False, [3, 4, 5]),\n ((2, 2), True, True, [0, 4]),\n ((2, 2), False, True, [4]),\n (3, True, False, slice(None, None)),\n (3, False, False, slice(1, None)),\n (3, True, True, [0, 1, 2, 4]),\n (3, False, True, [1, 2, 4]),\n ((2, 3), True, False, [0, 3, 4, 5, 6, 7, 8, 9]),\n ((2, 3), False, False, slice(3, None)),\n ((2, 3), True, True, [0, 4]),\n ((2, 3), False, True, [4]),\n ((3, 3), True, False, [0, 6, 7, 8, 9]),\n ((3, 3), False, False, [6, 7, 8, 9]),\n ((3, 3), True, True, [0]),\n ((3, 3), False, True, []), # would need 3 input features\n ],\n)\[email protected](\n \"sparse_X\",\n [False, sparse.csr_matrix, sparse.csc_matrix],\n)\ndef test_polynomial_features_two_features(\n two_features_degree3,\n degree,\n include_bias,\n interaction_only,\n indices,\n sparse_X,\n):\n \"\"\"Test PolynomialFeatures on 2 features up to degree 3.\"\"\"\n X, P = two_features_degree3\n if sparse_X:\n X = sparse_X(X)\n tf = PolynomialFeatures(\n degree=degree, include_bias=include_bias, interaction_only=interaction_only\n ).fit(X)\n out = tf.transform(X)\n if sparse_X:\n out = out.toarray()\n assert_allclose(out, P[:, indices])\n if tf.n_output_features_ > 0:\n assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_)\n\n\ndef test_polynomial_feature_names():\n X = np.arange(30).reshape(10, 3)\n poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)\n feature_names = poly.get_feature_names()\n assert_array_equal(\n [\"1\", \"x0\", \"x1\", \"x2\", \"x0^2\", \"x0 x1\", \"x0 x2\", \"x1^2\", \"x1 x2\", \"x2^2\"],\n feature_names,\n )\n assert len(feature_names) == poly.transform(X).shape[1]\n\n poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)\n feature_names = poly.get_feature_names([\"a\", \"b\", \"c\"])\n assert_array_equal(\n [\n \"a\",\n \"b\",\n \"c\",\n \"a^2\",\n \"a b\",\n \"a c\",\n \"b^2\",\n \"b c\",\n \"c^2\",\n \"a^3\",\n \"a^2 b\",\n \"a^2 c\",\n \"a b^2\",\n \"a b c\",\n \"a c^2\",\n \"b^3\",\n \"b^2 c\",\n \"b c^2\",\n \"c^3\",\n ],\n feature_names,\n )\n assert len(feature_names) == poly.transform(X).shape[1]\n\n poly = PolynomialFeatures(degree=(2, 3), include_bias=False).fit(X)\n feature_names = poly.get_feature_names([\"a\", \"b\", \"c\"])\n assert_array_equal(\n [\n \"a^2\",\n \"a b\",\n \"a c\",\n \"b^2\",\n \"b c\",\n \"c^2\",\n \"a^3\",\n \"a^2 b\",\n \"a^2 c\",\n \"a b^2\",\n \"a b c\",\n \"a c^2\",\n \"b^3\",\n \"b^2 c\",\n \"b c^2\",\n \"c^3\",\n ],\n feature_names,\n )\n assert len(feature_names) == poly.transform(X).shape[1]\n\n poly = PolynomialFeatures(\n degree=(3, 3), include_bias=True, interaction_only=True\n ).fit(X)\n feature_names = poly.get_feature_names([\"a\", \"b\", \"c\"])\n assert_array_equal([\"1\", \"a b c\"], feature_names)\n assert len(feature_names) == poly.transform(X).shape[1]\n\n # test some unicode\n poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)\n feature_names = poly.get_feature_names([\"\\u0001F40D\", \"\\u262E\", \"\\u05D0\"])\n assert_array_equal([\"1\", \"\\u0001F40D\", \"\\u262E\", \"\\u05D0\"], feature_names)\n\n\[email protected](\n [\"deg\", \"include_bias\", \"interaction_only\", \"dtype\"],\n [\n (1, True, False, int),\n (2, True, False, int),\n (2, True, False, np.float32),\n (2, True, False, np.float64),\n (3, False, False, np.float64),\n (3, False, True, np.float64),\n (4, False, False, np.float64),\n (4, False, True, np.float64),\n ],\n)\ndef test_polynomial_features_csc_X(deg, include_bias, interaction_only, dtype):\n rng = np.random.RandomState(0)\n X = rng.randint(0, 2, (100, 2))\n X_csc = sparse.csc_matrix(X)\n\n est = PolynomialFeatures(\n deg, include_bias=include_bias, interaction_only=interaction_only\n )\n Xt_csc = est.fit_transform(X_csc.astype(dtype))\n Xt_dense = est.fit_transform(X.astype(dtype))\n\n assert isinstance(Xt_csc, sparse.csc_matrix)\n assert Xt_csc.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csc.A, Xt_dense)\n\n\[email protected](\n [\"deg\", \"include_bias\", \"interaction_only\", \"dtype\"],\n [\n (1, True, False, int),\n (2, True, False, int),\n (2, True, False, np.float32),\n (2, True, False, np.float64),\n (3, False, False, np.float64),\n (3, False, True, np.float64),\n ],\n)\ndef test_polynomial_features_csr_X(deg, include_bias, interaction_only, dtype):\n rng = np.random.RandomState(0)\n X = rng.randint(0, 2, (100, 2))\n X_csr = sparse.csr_matrix(X)\n\n est = PolynomialFeatures(\n deg, include_bias=include_bias, interaction_only=interaction_only\n )\n Xt_csr = est.fit_transform(X_csr.astype(dtype))\n Xt_dense = est.fit_transform(X.astype(dtype, copy=False))\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\[email protected](\"n_features\", [1, 4, 5])\[email protected](\n \"min_degree, max_degree\", [(0, 1), (0, 2), (1, 3), (0, 4), (3, 4)]\n)\[email protected](\"interaction_only\", [True, False])\[email protected](\"include_bias\", [True, False])\ndef test_num_combinations(\n n_features,\n min_degree,\n max_degree,\n interaction_only,\n include_bias,\n):\n \"\"\"\n Test that n_output_features_ is calculated correctly.\n \"\"\"\n x = sparse.csr_matrix(([1], ([0], [n_features - 1])))\n est = PolynomialFeatures(\n degree=max_degree,\n interaction_only=interaction_only,\n include_bias=include_bias,\n )\n est.fit(x)\n num_combos = est.n_output_features_\n\n combos = PolynomialFeatures._combinations(\n n_features=n_features,\n min_degree=0,\n max_degree=max_degree,\n interaction_only=interaction_only,\n include_bias=include_bias,\n )\n assert num_combos == sum([1 for _ in combos])\n\n\[email protected](\n [\"deg\", \"include_bias\", \"interaction_only\", \"dtype\"],\n [\n (2, True, False, np.float32),\n (2, True, False, np.float64),\n (3, False, False, np.float64),\n (3, False, True, np.float64),\n ],\n)\ndef test_polynomial_features_csr_X_floats(deg, include_bias, interaction_only, dtype):\n X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()\n X = X_csr.toarray()\n\n est = PolynomialFeatures(\n deg, include_bias=include_bias, interaction_only=interaction_only\n )\n Xt_csr = est.fit_transform(X_csr.astype(dtype))\n Xt_dense = est.fit_transform(X.astype(dtype))\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\[email protected](\n [\"zero_row_index\", \"deg\", \"interaction_only\"],\n [\n (0, 2, True),\n (1, 2, True),\n (2, 2, True),\n (0, 3, True),\n (1, 3, True),\n (2, 3, True),\n (0, 2, False),\n (1, 2, False),\n (2, 2, False),\n (0, 3, False),\n (1, 3, False),\n (2, 3, False),\n ],\n)\ndef test_polynomial_features_csr_X_zero_row(zero_row_index, deg, interaction_only):\n X_csr = sparse_random(3, 10, 1.0, random_state=0).tocsr()\n X_csr[zero_row_index, :] = 0.0\n X = X_csr.toarray()\n\n est = PolynomialFeatures(deg, include_bias=False, interaction_only=interaction_only)\n Xt_csr = est.fit_transform(X_csr)\n Xt_dense = est.fit_transform(X)\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\n# This degree should always be one more than the highest degree supported by\n# _csr_expansion.\[email protected](\n [\"include_bias\", \"interaction_only\"],\n [(True, True), (True, False), (False, True), (False, False)],\n)\ndef test_polynomial_features_csr_X_degree_4(include_bias, interaction_only):\n X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()\n X = X_csr.toarray()\n\n est = PolynomialFeatures(\n 4, include_bias=include_bias, interaction_only=interaction_only\n )\n Xt_csr = est.fit_transform(X_csr)\n Xt_dense = est.fit_transform(X)\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\[email protected](\n [\"deg\", \"dim\", \"interaction_only\"],\n [\n (2, 1, True),\n (2, 2, True),\n (3, 1, True),\n (3, 2, True),\n (3, 3, True),\n (2, 1, False),\n (2, 2, False),\n (3, 1, False),\n (3, 2, False),\n (3, 3, False),\n ],\n)\ndef test_polynomial_features_csr_X_dim_edges(deg, dim, interaction_only):\n X_csr = sparse_random(1000, dim, 0.5, random_state=0).tocsr()\n X = X_csr.toarray()\n\n est = PolynomialFeatures(deg, interaction_only=interaction_only)\n Xt_csr = est.fit_transform(X_csr)\n Xt_dense = est.fit_transform(X)\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\ndef test_polynomial_features_deprecated_n_input_features():\n # check that we raise a deprecation warning when accessing\n # `n_input_features_`. FIXME: remove in 1.2\n depr_msg = (\n \"The attribute `n_input_features_` was deprecated in version \"\n \"1.0 and will be removed in 1.2.\"\n )\n X = np.arange(10).reshape(5, 2)\n\n with pytest.warns(FutureWarning, match=depr_msg):\n PolynomialFeatures().fit(X).n_input_features_\n", "# Authors: Nicolas Tresegnie <[email protected]>\n# Sergey Feldman <[email protected]>\n# License: BSD 3 clause\n\nimport numbers\nimport warnings\nfrom collections import Counter\n\nimport numpy as np\nimport numpy.ma as ma\nfrom scipy import sparse as sp\nfrom scipy import stats\n\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..utils.sparsefuncs import _get_median\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.validation import FLOAT_DTYPES\nfrom ..utils._mask import _get_mask\nfrom ..utils import is_scalar_nan\n\n\ndef _check_inputs_dtype(X, missing_values):\n if X.dtype.kind in (\"f\", \"i\", \"u\") and not isinstance(missing_values, numbers.Real):\n raise ValueError(\n \"'X' and 'missing_values' types are expected to be\"\n \" both numerical. Got X.dtype={} and \"\n \" type(missing_values)={}.\".format(X.dtype, type(missing_values))\n )\n\n\ndef _most_frequent(array, extra_value, n_repeat):\n \"\"\"Compute the most frequent value in a 1d array extended with\n [extra_value] * n_repeat, where extra_value is assumed to be not part\n of the array.\"\"\"\n # Compute the most frequent value in array only\n if array.size > 0:\n if array.dtype == object:\n # scipy.stats.mode is slow with object dtype array.\n # Python Counter is more efficient\n counter = Counter(array)\n most_frequent_count = counter.most_common(1)[0][1]\n # tie breaking similarly to scipy.stats.mode\n most_frequent_value = min(\n value\n for value, count in counter.items()\n if count == most_frequent_count\n )\n else:\n mode = stats.mode(array)\n most_frequent_value = mode[0][0]\n most_frequent_count = mode[1][0]\n else:\n most_frequent_value = 0\n most_frequent_count = 0\n\n # Compare to array + [extra_value] * n_repeat\n if most_frequent_count == 0 and n_repeat == 0:\n return np.nan\n elif most_frequent_count < n_repeat:\n return extra_value\n elif most_frequent_count > n_repeat:\n return most_frequent_value\n elif most_frequent_count == n_repeat:\n # tie breaking similarly to scipy.stats.mode\n return min(most_frequent_value, extra_value)\n\n\nclass _BaseImputer(TransformerMixin, BaseEstimator):\n \"\"\"Base class for all imputers.\n\n It adds automatically support for `add_indicator`.\n \"\"\"\n\n def __init__(self, *, missing_values=np.nan, add_indicator=False):\n self.missing_values = missing_values\n self.add_indicator = add_indicator\n\n def _fit_indicator(self, X):\n \"\"\"Fit a MissingIndicator.\"\"\"\n if self.add_indicator:\n self.indicator_ = MissingIndicator(\n missing_values=self.missing_values, error_on_new=False\n )\n self.indicator_._fit(X, precomputed=True)\n else:\n self.indicator_ = None\n\n def _transform_indicator(self, X):\n \"\"\"Compute the indicator mask.'\n\n Note that X must be the original data as passed to the imputer before\n any imputation, since imputation may be done inplace in some cases.\n \"\"\"\n if self.add_indicator:\n if not hasattr(self, \"indicator_\"):\n raise ValueError(\n \"Make sure to call _fit_indicator before _transform_indicator\"\n )\n return self.indicator_.transform(X)\n\n def _concatenate_indicator(self, X_imputed, X_indicator):\n \"\"\"Concatenate indicator mask with the imputed data.\"\"\"\n if not self.add_indicator:\n return X_imputed\n\n hstack = sp.hstack if sp.issparse(X_imputed) else np.hstack\n if X_indicator is None:\n raise ValueError(\n \"Data from the missing indicator are not provided. Call \"\n \"_fit_indicator and _transform_indicator in the imputer \"\n \"implementation.\"\n )\n\n return hstack((X_imputed, X_indicator))\n\n def _more_tags(self):\n return {\"allow_nan\": is_scalar_nan(self.missing_values)}\n\n\nclass SimpleImputer(_BaseImputer):\n \"\"\"Imputation transformer for completing missing values.\n\n Read more in the :ref:`User Guide <impute>`.\n\n .. versionadded:: 0.20\n `SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer`\n estimator which is now removed.\n\n Parameters\n ----------\n missing_values : int, float, str, np.nan or None, default=np.nan\n The placeholder for the missing values. All occurrences of\n `missing_values` will be imputed. For pandas' dataframes with\n nullable integer dtypes with missing values, `missing_values`\n should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.\n\n strategy : string, default='mean'\n The imputation strategy.\n\n - If \"mean\", then replace missing values using the mean along\n each column. Can only be used with numeric data.\n - If \"median\", then replace missing values using the median along\n each column. Can only be used with numeric data.\n - If \"most_frequent\", then replace missing using the most frequent\n value along each column. Can be used with strings or numeric data.\n If there is more than one such value, only the smallest is returned.\n - If \"constant\", then replace missing values with fill_value. Can be\n used with strings or numeric data.\n\n .. versionadded:: 0.20\n strategy=\"constant\" for fixed value imputation.\n\n fill_value : string or numerical value, default=None\n When strategy == \"constant\", fill_value is used to replace all\n occurrences of missing_values.\n If left to the default, fill_value will be 0 when imputing numerical\n data and \"missing_value\" for strings or object data types.\n\n verbose : integer, default=0\n Controls the verbosity of the imputer.\n\n copy : boolean, default=True\n If True, a copy of X will be created. If False, imputation will\n be done in-place whenever possible. Note that, in the following cases,\n a new copy will always be made, even if `copy=False`:\n\n - If X is not an array of floating values;\n - If X is encoded as a CSR matrix;\n - If add_indicator=True.\n\n add_indicator : boolean, default=False\n If True, a :class:`MissingIndicator` transform will stack onto output\n of the imputer's transform. This allows a predictive estimator\n to account for missingness despite imputation. If a feature has no\n missing values at fit/train time, the feature won't appear on\n the missing indicator even if there are missing values at\n transform/test time.\n\n Attributes\n ----------\n statistics_ : array of shape (n_features,)\n The imputation fill value for each feature.\n Computing statistics can result in `np.nan` values.\n During :meth:`transform`, features corresponding to `np.nan`\n statistics will be discarded.\n\n indicator_ : :class:`~sklearn.impute.MissingIndicator`\n Indicator used to add binary indicators for missing values.\n ``None`` if add_indicator is False.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n See Also\n --------\n IterativeImputer : Multivariate imputation of missing values.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.impute import SimpleImputer\n >>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')\n >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])\n SimpleImputer()\n >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]\n >>> print(imp_mean.transform(X))\n [[ 7. 2. 3. ]\n [ 4. 3.5 6. ]\n [10. 3.5 9. ]]\n\n Notes\n -----\n Columns which only contained missing values at :meth:`fit` are discarded\n upon :meth:`transform` if strategy is not \"constant\".\n\n \"\"\"\n\n def __init__(\n self,\n *,\n missing_values=np.nan,\n strategy=\"mean\",\n fill_value=None,\n verbose=0,\n copy=True,\n add_indicator=False,\n ):\n super().__init__(missing_values=missing_values, add_indicator=add_indicator)\n self.strategy = strategy\n self.fill_value = fill_value\n self.verbose = verbose\n self.copy = copy\n\n def _validate_input(self, X, in_fit):\n allowed_strategies = [\"mean\", \"median\", \"most_frequent\", \"constant\"]\n if self.strategy not in allowed_strategies:\n raise ValueError(\n \"Can only use these strategies: {0} got strategy={1}\".format(\n allowed_strategies, self.strategy\n )\n )\n\n if self.strategy in (\"most_frequent\", \"constant\"):\n # If input is a list of strings, dtype = object.\n # Otherwise ValueError is raised in SimpleImputer\n # with strategy='most_frequent' or 'constant'\n # because the list is converted to Unicode numpy array\n if isinstance(X, list) and any(\n isinstance(elem, str) for row in X for elem in row\n ):\n dtype = object\n else:\n dtype = None\n else:\n dtype = FLOAT_DTYPES\n\n if not is_scalar_nan(self.missing_values):\n force_all_finite = True\n else:\n force_all_finite = \"allow-nan\"\n\n try:\n X = self._validate_data(\n X,\n reset=in_fit,\n accept_sparse=\"csc\",\n dtype=dtype,\n force_all_finite=force_all_finite,\n copy=self.copy,\n )\n except ValueError as ve:\n if \"could not convert\" in str(ve):\n new_ve = ValueError(\n \"Cannot use {} strategy with non-numeric data:\\n{}\".format(\n self.strategy, ve\n )\n )\n raise new_ve from None\n else:\n raise ve\n\n _check_inputs_dtype(X, self.missing_values)\n if X.dtype.kind not in (\"i\", \"u\", \"f\", \"O\"):\n raise ValueError(\n \"SimpleImputer does not support data with dtype \"\n \"{0}. Please provide either a numeric array (with\"\n \" a floating point or integer dtype) or \"\n \"categorical data represented either as an array \"\n \"with integer dtype or an array of string values \"\n \"with an object dtype.\".format(X.dtype)\n )\n\n return X\n\n def fit(self, X, y=None):\n \"\"\"Fit the imputer on X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Input data, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n\n Returns\n -------\n self : SimpleImputer\n \"\"\"\n X = self._validate_input(X, in_fit=True)\n\n # default fill_value is 0 for numerical input and \"missing_value\"\n # otherwise\n if self.fill_value is None:\n if X.dtype.kind in (\"i\", \"u\", \"f\"):\n fill_value = 0\n else:\n fill_value = \"missing_value\"\n else:\n fill_value = self.fill_value\n\n # fill_value should be numerical in case of numerical input\n if (\n self.strategy == \"constant\"\n and X.dtype.kind in (\"i\", \"u\", \"f\")\n and not isinstance(fill_value, numbers.Real)\n ):\n raise ValueError(\n \"'fill_value'={0} is invalid. Expected a \"\n \"numerical value when imputing numerical \"\n \"data\".format(fill_value)\n )\n\n if sp.issparse(X):\n # missing_values = 0 not allowed with sparse data as it would\n # force densification\n if self.missing_values == 0:\n raise ValueError(\n \"Imputation not possible when missing_values \"\n \"== 0 and input is sparse. Provide a dense \"\n \"array instead.\"\n )\n else:\n self.statistics_ = self._sparse_fit(\n X, self.strategy, self.missing_values, fill_value\n )\n\n else:\n self.statistics_ = self._dense_fit(\n X, self.strategy, self.missing_values, fill_value\n )\n\n return self\n\n def _sparse_fit(self, X, strategy, missing_values, fill_value):\n \"\"\"Fit the transformer on sparse data.\"\"\"\n missing_mask = _get_mask(X, missing_values)\n mask_data = missing_mask.data\n n_implicit_zeros = X.shape[0] - np.diff(X.indptr)\n\n statistics = np.empty(X.shape[1])\n\n if strategy == \"constant\":\n # for constant strategy, self.statistcs_ is used to store\n # fill_value in each column\n statistics.fill(fill_value)\n else:\n for i in range(X.shape[1]):\n column = X.data[X.indptr[i] : X.indptr[i + 1]]\n mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]]\n column = column[~mask_column]\n\n # combine explicit and implicit zeros\n mask_zeros = _get_mask(column, 0)\n column = column[~mask_zeros]\n n_explicit_zeros = mask_zeros.sum()\n n_zeros = n_implicit_zeros[i] + n_explicit_zeros\n\n if strategy == \"mean\":\n s = column.size + n_zeros\n statistics[i] = np.nan if s == 0 else column.sum() / s\n\n elif strategy == \"median\":\n statistics[i] = _get_median(column, n_zeros)\n\n elif strategy == \"most_frequent\":\n statistics[i] = _most_frequent(column, 0, n_zeros)\n super()._fit_indicator(missing_mask)\n\n return statistics\n\n def _dense_fit(self, X, strategy, missing_values, fill_value):\n \"\"\"Fit the transformer on dense data.\"\"\"\n missing_mask = _get_mask(X, missing_values)\n masked_X = ma.masked_array(X, mask=missing_mask)\n\n super()._fit_indicator(missing_mask)\n\n # Mean\n if strategy == \"mean\":\n mean_masked = np.ma.mean(masked_X, axis=0)\n # Avoid the warning \"Warning: converting a masked element to nan.\"\n mean = np.ma.getdata(mean_masked)\n mean[np.ma.getmask(mean_masked)] = np.nan\n\n return mean\n\n # Median\n elif strategy == \"median\":\n median_masked = np.ma.median(masked_X, axis=0)\n # Avoid the warning \"Warning: converting a masked element to nan.\"\n median = np.ma.getdata(median_masked)\n median[np.ma.getmaskarray(median_masked)] = np.nan\n\n return median\n\n # Most frequent\n elif strategy == \"most_frequent\":\n # Avoid use of scipy.stats.mstats.mode due to the required\n # additional overhead and slow benchmarking performance.\n # See Issue 14325 and PR 14399 for full discussion.\n\n # To be able access the elements by columns\n X = X.transpose()\n mask = missing_mask.transpose()\n\n if X.dtype.kind == \"O\":\n most_frequent = np.empty(X.shape[0], dtype=object)\n else:\n most_frequent = np.empty(X.shape[0])\n\n for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):\n row_mask = np.logical_not(row_mask).astype(bool)\n row = row[row_mask]\n most_frequent[i] = _most_frequent(row, np.nan, 0)\n\n return most_frequent\n\n # Constant\n elif strategy == \"constant\":\n # for constant strategy, self.statistcs_ is used to store\n # fill_value in each column\n return np.full(X.shape[1], fill_value, dtype=X.dtype)\n\n def transform(self, X):\n \"\"\"Impute all missing values in X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n X_imputed : {ndarray, sparse matrix} of shape \\\n (n_samples, n_features_out)\n `X` with imputed values.\n \"\"\"\n check_is_fitted(self)\n\n X = self._validate_input(X, in_fit=False)\n statistics = self.statistics_\n\n if X.shape[1] != statistics.shape[0]:\n raise ValueError(\n \"X has %d features per sample, expected %d\"\n % (X.shape[1], self.statistics_.shape[0])\n )\n\n # compute mask before eliminating invalid features\n missing_mask = _get_mask(X, self.missing_values)\n\n # Delete the invalid columns if strategy is not constant\n if self.strategy == \"constant\":\n valid_statistics = statistics\n valid_statistics_indexes = None\n else:\n # same as np.isnan but also works for object dtypes\n invalid_mask = _get_mask(statistics, np.nan)\n valid_mask = np.logical_not(invalid_mask)\n valid_statistics = statistics[valid_mask]\n valid_statistics_indexes = np.flatnonzero(valid_mask)\n\n if invalid_mask.any():\n missing = np.arange(X.shape[1])[invalid_mask]\n if self.verbose:\n warnings.warn(\n \"Deleting features without observed values: %s\" % missing\n )\n X = X[:, valid_statistics_indexes]\n\n # Do actual imputation\n if sp.issparse(X):\n if self.missing_values == 0:\n raise ValueError(\n \"Imputation not possible when missing_values \"\n \"== 0 and input is sparse. Provide a dense \"\n \"array instead.\"\n )\n else:\n # if no invalid statistics are found, use the mask computed\n # before, else recompute mask\n if valid_statistics_indexes is None:\n mask = missing_mask.data\n else:\n mask = _get_mask(X.data, self.missing_values)\n indexes = np.repeat(\n np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr)\n )[mask]\n\n X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)\n else:\n # use mask computed before eliminating invalid mask\n if valid_statistics_indexes is None:\n mask_valid_features = missing_mask\n else:\n mask_valid_features = missing_mask[:, valid_statistics_indexes]\n n_missing = np.sum(mask_valid_features, axis=0)\n values = np.repeat(valid_statistics, n_missing)\n coordinates = np.where(mask_valid_features.transpose())[::-1]\n\n X[coordinates] = values\n\n X_indicator = super()._transform_indicator(missing_mask)\n\n return super()._concatenate_indicator(X, X_indicator)\n\n def inverse_transform(self, X):\n \"\"\"Convert the data back to the original representation.\n\n Inverts the `transform` operation performed on an array.\n This operation can only be performed after :class:`SimpleImputer` is\n instantiated with `add_indicator=True`.\n\n Note that ``inverse_transform`` can only invert the transform in\n features that have binary indicators for missing values. If a feature\n has no missing values at ``fit`` time, the feature won't have a binary\n indicator, and the imputation done at ``transform`` time won't be\n inverted.\n\n .. versionadded:: 0.24\n\n Parameters\n ----------\n X : array-like of shape \\\n (n_samples, n_features + n_features_missing_indicator)\n The imputed data to be reverted to original data. It has to be\n an augmented array of imputed data and the missing indicator mask.\n\n Returns\n -------\n X_original : ndarray of shape (n_samples, n_features)\n The original X with missing values as it was prior\n to imputation.\n \"\"\"\n check_is_fitted(self)\n\n if not self.add_indicator:\n raise ValueError(\n \"'inverse_transform' works only when \"\n \"'SimpleImputer' is instantiated with \"\n \"'add_indicator=True'. \"\n f\"Got 'add_indicator={self.add_indicator}' \"\n \"instead.\"\n )\n\n n_features_missing = len(self.indicator_.features_)\n non_empty_feature_count = X.shape[1] - n_features_missing\n array_imputed = X[:, :non_empty_feature_count].copy()\n missing_mask = X[:, non_empty_feature_count:].astype(bool)\n\n n_features_original = len(self.statistics_)\n shape_original = (X.shape[0], n_features_original)\n X_original = np.zeros(shape_original)\n X_original[:, self.indicator_.features_] = missing_mask\n full_mask = X_original.astype(bool)\n\n imputed_idx, original_idx = 0, 0\n while imputed_idx < len(array_imputed.T):\n if not np.all(X_original[:, original_idx]):\n X_original[:, original_idx] = array_imputed.T[imputed_idx]\n imputed_idx += 1\n original_idx += 1\n else:\n original_idx += 1\n\n X_original[full_mask] = self.missing_values\n return X_original\n\n\nclass MissingIndicator(TransformerMixin, BaseEstimator):\n \"\"\"Binary indicators for missing values.\n\n Note that this component typically should not be used in a vanilla\n :class:`Pipeline` consisting of transformers and a classifier, but rather\n could be added using a :class:`FeatureUnion` or :class:`ColumnTransformer`.\n\n Read more in the :ref:`User Guide <impute>`.\n\n .. versionadded:: 0.20\n\n Parameters\n ----------\n missing_values : int, float, string, np.nan or None, default=np.nan\n The placeholder for the missing values. All occurrences of\n `missing_values` will be imputed. For pandas' dataframes with\n nullable integer dtypes with missing values, `missing_values`\n should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.\n\n features : {'missing-only', 'all'}, default='missing-only'\n Whether the imputer mask should represent all or a subset of\n features.\n\n - If 'missing-only' (default), the imputer mask will only represent\n features containing missing values during fit time.\n - If 'all', the imputer mask will represent all features.\n\n sparse : bool or 'auto', default='auto'\n Whether the imputer mask format should be sparse or dense.\n\n - If 'auto' (default), the imputer mask will be of same type as\n input.\n - If True, the imputer mask will be a sparse matrix.\n - If False, the imputer mask will be a numpy array.\n\n error_on_new : bool, default=True\n If True, transform will raise an error when there are features with\n missing values in transform that have no missing values in fit. This is\n applicable only when `features='missing-only'`.\n\n Attributes\n ----------\n features_ : ndarray, shape (n_missing_features,) or (n_features,)\n The features indices which will be returned when calling ``transform``.\n They are computed during ``fit``. For ``features='all'``, it is\n to ``range(n_features)``.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.impute import MissingIndicator\n >>> X1 = np.array([[np.nan, 1, 3],\n ... [4, 0, np.nan],\n ... [8, 1, 0]])\n >>> X2 = np.array([[5, 1, np.nan],\n ... [np.nan, 2, 3],\n ... [2, 4, 0]])\n >>> indicator = MissingIndicator()\n >>> indicator.fit(X1)\n MissingIndicator()\n >>> X2_tr = indicator.transform(X2)\n >>> X2_tr\n array([[False, True],\n [ True, False],\n [False, False]])\n\n \"\"\"\n\n def __init__(\n self,\n *,\n missing_values=np.nan,\n features=\"missing-only\",\n sparse=\"auto\",\n error_on_new=True,\n ):\n self.missing_values = missing_values\n self.features = features\n self.sparse = sparse\n self.error_on_new = error_on_new\n\n def _get_missing_features_info(self, X):\n \"\"\"Compute the imputer mask and the indices of the features\n containing missing values.\n\n Parameters\n ----------\n X : {ndarray or sparse matrix}, shape (n_samples, n_features)\n The input data with missing values. Note that ``X`` has been\n checked in ``fit`` and ``transform`` before to call this function.\n\n Returns\n -------\n imputer_mask : {ndarray or sparse matrix}, shape \\\n (n_samples, n_features)\n The imputer mask of the original data.\n\n features_with_missing : ndarray, shape (n_features_with_missing)\n The features containing missing values.\n\n \"\"\"\n if not self._precomputed:\n imputer_mask = _get_mask(X, self.missing_values)\n else:\n imputer_mask = X\n\n if sp.issparse(X):\n imputer_mask.eliminate_zeros()\n\n if self.features == \"missing-only\":\n n_missing = imputer_mask.getnnz(axis=0)\n\n if self.sparse is False:\n imputer_mask = imputer_mask.toarray()\n elif imputer_mask.format == \"csr\":\n imputer_mask = imputer_mask.tocsc()\n else:\n if not self._precomputed:\n imputer_mask = _get_mask(X, self.missing_values)\n else:\n imputer_mask = X\n\n if self.features == \"missing-only\":\n n_missing = imputer_mask.sum(axis=0)\n\n if self.sparse is True:\n imputer_mask = sp.csc_matrix(imputer_mask)\n\n if self.features == \"all\":\n features_indices = np.arange(X.shape[1])\n else:\n features_indices = np.flatnonzero(n_missing)\n\n return imputer_mask, features_indices\n\n def _validate_input(self, X, in_fit):\n if not is_scalar_nan(self.missing_values):\n force_all_finite = True\n else:\n force_all_finite = \"allow-nan\"\n X = self._validate_data(\n X,\n reset=in_fit,\n accept_sparse=(\"csc\", \"csr\"),\n dtype=None,\n force_all_finite=force_all_finite,\n )\n _check_inputs_dtype(X, self.missing_values)\n if X.dtype.kind not in (\"i\", \"u\", \"f\", \"O\"):\n raise ValueError(\n \"MissingIndicator does not support data with \"\n \"dtype {0}. Please provide either a numeric array\"\n \" (with a floating point or integer dtype) or \"\n \"categorical data represented either as an array \"\n \"with integer dtype or an array of string values \"\n \"with an object dtype.\".format(X.dtype)\n )\n\n if sp.issparse(X) and self.missing_values == 0:\n # missing_values = 0 not allowed with sparse data as it would\n # force densification\n raise ValueError(\n \"Sparse input with missing_values=0 is \"\n \"not supported. Provide a dense \"\n \"array instead.\"\n )\n\n return X\n\n def _fit(self, X, y=None, precomputed=False):\n \"\"\"Fit the transformer on X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Input data, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n If `precomputed` is True, then `X` is a mask of the\n input data.\n\n precomputed : bool\n Whether the input data is a mask.\n\n Returns\n -------\n imputer_mask : {ndarray or sparse matrix}, shape (n_samples, \\\n n_features)\n The imputer mask of the original data.\n\n \"\"\"\n if precomputed:\n if not (hasattr(X, \"dtype\") and X.dtype.kind == \"b\"):\n raise ValueError(\"precomputed is True but the input data is not a mask\")\n self._precomputed = True\n else:\n self._precomputed = False\n\n # Need not validate X again as it would have already been validated\n # in the Imputer calling MissingIndicator\n if not self._precomputed:\n X = self._validate_input(X, in_fit=True)\n\n self._n_features = X.shape[1]\n\n if self.features not in (\"missing-only\", \"all\"):\n raise ValueError(\n \"'features' has to be either 'missing-only' or \"\n \"'all'. Got {} instead.\".format(self.features)\n )\n\n if not (\n (isinstance(self.sparse, str) and self.sparse == \"auto\")\n or isinstance(self.sparse, bool)\n ):\n raise ValueError(\n \"'sparse' has to be a boolean or 'auto'. Got {!r} instead.\".format(\n self.sparse\n )\n )\n\n missing_features_info = self._get_missing_features_info(X)\n self.features_ = missing_features_info[1]\n\n return missing_features_info[0]\n\n def fit(self, X, y=None):\n \"\"\"Fit the transformer on X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Input data, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n self._fit(X, y)\n\n return self\n\n def transform(self, X):\n \"\"\"Generate missing values indicator for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n Xt : {ndarray or sparse matrix}, shape (n_samples, n_features) \\\n or (n_samples, n_features_with_missing)\n The missing indicator for input data. The data type of ``Xt``\n will be boolean.\n\n \"\"\"\n check_is_fitted(self)\n\n # Need not validate X again as it would have already been validated\n # in the Imputer calling MissingIndicator\n if not self._precomputed:\n X = self._validate_input(X, in_fit=False)\n else:\n if not (hasattr(X, \"dtype\") and X.dtype.kind == \"b\"):\n raise ValueError(\"precomputed is True but the input data is not a mask\")\n\n imputer_mask, features = self._get_missing_features_info(X)\n\n if self.features == \"missing-only\":\n features_diff_fit_trans = np.setdiff1d(features, self.features_)\n if self.error_on_new and features_diff_fit_trans.size > 0:\n raise ValueError(\n \"The features {} have missing values \"\n \"in transform but have no missing values \"\n \"in fit.\".format(features_diff_fit_trans)\n )\n\n if self.features_.size < self._n_features:\n imputer_mask = imputer_mask[:, self.features_]\n\n return imputer_mask\n\n def fit_transform(self, X, y=None):\n \"\"\"Generate missing values indicator for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n Xt : {ndarray or sparse matrix}, shape (n_samples, n_features) \\\n or (n_samples, n_features_with_missing)\n The missing indicator for input data. The data type of ``Xt``\n will be boolean.\n\n \"\"\"\n imputer_mask = self._fit(X, y)\n\n if self.features_.size < self._n_features:\n imputer_mask = imputer_mask[:, self.features_]\n\n return imputer_mask\n\n def _more_tags(self):\n return {\n \"allow_nan\": True,\n \"X_types\": [\"2darray\", \"string\"],\n \"preserves_dtype\": [],\n }\n", "from math import ceil\n\nimport pytest\nfrom scipy.stats import norm, randint\nimport numpy as np\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.experimental import enable_halving_search_cv # noqa\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import LeaveOneGroupOut\nfrom sklearn.model_selection import LeavePGroupsOut\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.model_selection import GroupShuffleSplit\nfrom sklearn.model_selection import HalvingGridSearchCV\nfrom sklearn.model_selection import HalvingRandomSearchCV\nfrom sklearn.model_selection import KFold, ShuffleSplit\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection._search_successive_halving import (\n _SubsampleMetaSplitter,\n _top_k,\n)\n\n\nclass FastClassifier(DummyClassifier):\n \"\"\"Dummy classifier that accepts parameters a, b, ... z.\n\n These parameter don't affect the predictions and are useful for fast\n grid searching.\"\"\"\n\n def __init__(\n self, strategy=\"stratified\", random_state=None, constant=None, **kwargs\n ):\n super().__init__(\n strategy=strategy, random_state=random_state, constant=constant\n )\n\n def get_params(self, deep=False):\n params = super().get_params(deep=deep)\n for char in range(ord(\"a\"), ord(\"z\") + 1):\n params[chr(char)] = \"whatever\"\n return params\n\n\[email protected](\"Est\", (HalvingGridSearchCV, HalvingRandomSearchCV))\[email protected](\n \"aggressive_elimination,\"\n \"max_resources,\"\n \"expected_n_iterations,\"\n \"expected_n_required_iterations,\"\n \"expected_n_possible_iterations,\"\n \"expected_n_remaining_candidates,\"\n \"expected_n_candidates,\"\n \"expected_n_resources,\",\n [\n # notice how it loops at the beginning\n # also, the number of candidates evaluated at the last iteration is\n # <= factor\n (True, \"limited\", 4, 4, 3, 1, [60, 20, 7, 3], [20, 20, 60, 180]),\n # no aggressive elimination: we end up with less iterations, and\n # the number of candidates at the last iter is > factor, which isn't\n # ideal\n (False, \"limited\", 3, 4, 3, 3, [60, 20, 7], [20, 60, 180]),\n # # When the amount of resource isn't limited, aggressive_elimination\n # # has no effect. Here the default min_resources='exhaust' will take\n # # over.\n (True, \"unlimited\", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),\n (False, \"unlimited\", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),\n ],\n)\ndef test_aggressive_elimination(\n Est,\n aggressive_elimination,\n max_resources,\n expected_n_iterations,\n expected_n_required_iterations,\n expected_n_possible_iterations,\n expected_n_remaining_candidates,\n expected_n_candidates,\n expected_n_resources,\n):\n # Test the aggressive_elimination parameter.\n\n n_samples = 1000\n X, y = make_classification(n_samples=n_samples, random_state=0)\n param_grid = {\"a\": (\"l1\", \"l2\"), \"b\": list(range(30))}\n base_estimator = FastClassifier()\n\n if max_resources == \"limited\":\n max_resources = 180\n else:\n max_resources = n_samples\n\n sh = Est(\n base_estimator,\n param_grid,\n aggressive_elimination=aggressive_elimination,\n max_resources=max_resources,\n factor=3,\n )\n sh.set_params(verbose=True) # just for test coverage\n\n if Est is HalvingRandomSearchCV:\n # same number of candidates as with the grid\n sh.set_params(n_candidates=2 * 30, min_resources=\"exhaust\")\n\n sh.fit(X, y)\n\n assert sh.n_iterations_ == expected_n_iterations\n assert sh.n_required_iterations_ == expected_n_required_iterations\n assert sh.n_possible_iterations_ == expected_n_possible_iterations\n assert sh.n_resources_ == expected_n_resources\n assert sh.n_candidates_ == expected_n_candidates\n assert sh.n_remaining_candidates_ == expected_n_remaining_candidates\n assert ceil(sh.n_candidates_[-1] / sh.factor) == sh.n_remaining_candidates_\n\n\[email protected](\"Est\", (HalvingGridSearchCV, HalvingRandomSearchCV))\[email protected](\n \"min_resources,\"\n \"max_resources,\"\n \"expected_n_iterations,\"\n \"expected_n_possible_iterations,\"\n \"expected_n_resources,\",\n [\n # with enough resources\n (\"smallest\", \"auto\", 2, 4, [20, 60]),\n # with enough resources but min_resources set manually\n (50, \"auto\", 2, 3, [50, 150]),\n # without enough resources, only one iteration can be done\n (\"smallest\", 30, 1, 1, [20]),\n # with exhaust: use as much resources as possible at the last iter\n (\"exhaust\", \"auto\", 2, 2, [333, 999]),\n (\"exhaust\", 1000, 2, 2, [333, 999]),\n (\"exhaust\", 999, 2, 2, [333, 999]),\n (\"exhaust\", 600, 2, 2, [200, 600]),\n (\"exhaust\", 599, 2, 2, [199, 597]),\n (\"exhaust\", 300, 2, 2, [100, 300]),\n (\"exhaust\", 60, 2, 2, [20, 60]),\n (\"exhaust\", 50, 1, 1, [20]),\n (\"exhaust\", 20, 1, 1, [20]),\n ],\n)\ndef test_min_max_resources(\n Est,\n min_resources,\n max_resources,\n expected_n_iterations,\n expected_n_possible_iterations,\n expected_n_resources,\n):\n # Test the min_resources and max_resources parameters, and how they affect\n # the number of resources used at each iteration\n n_samples = 1000\n X, y = make_classification(n_samples=n_samples, random_state=0)\n param_grid = {\"a\": [1, 2], \"b\": [1, 2, 3]}\n base_estimator = FastClassifier()\n\n sh = Est(\n base_estimator,\n param_grid,\n factor=3,\n min_resources=min_resources,\n max_resources=max_resources,\n )\n if Est is HalvingRandomSearchCV:\n sh.set_params(n_candidates=6) # same number as with the grid\n\n sh.fit(X, y)\n\n expected_n_required_iterations = 2 # given 6 combinations and factor = 3\n assert sh.n_iterations_ == expected_n_iterations\n assert sh.n_required_iterations_ == expected_n_required_iterations\n assert sh.n_possible_iterations_ == expected_n_possible_iterations\n assert sh.n_resources_ == expected_n_resources\n if min_resources == \"exhaust\":\n assert sh.n_possible_iterations_ == sh.n_iterations_ == len(sh.n_resources_)\n\n\[email protected](\"Est\", (HalvingRandomSearchCV, HalvingGridSearchCV))\[email protected](\n \"max_resources, n_iterations, n_possible_iterations\",\n [\n (\"auto\", 5, 9), # all resources are used\n (1024, 5, 9),\n (700, 5, 8),\n (512, 5, 8),\n (511, 5, 7),\n (32, 4, 4),\n (31, 3, 3),\n (16, 3, 3),\n (4, 1, 1), # max_resources == min_resources, only one iteration is\n # possible\n ],\n)\ndef test_n_iterations(Est, max_resources, n_iterations, n_possible_iterations):\n # test the number of actual iterations that were run depending on\n # max_resources\n\n n_samples = 1024\n X, y = make_classification(n_samples=n_samples, random_state=1)\n param_grid = {\"a\": [1, 2], \"b\": list(range(10))}\n base_estimator = FastClassifier()\n factor = 2\n\n sh = Est(\n base_estimator,\n param_grid,\n cv=2,\n factor=factor,\n max_resources=max_resources,\n min_resources=4,\n )\n if Est is HalvingRandomSearchCV:\n sh.set_params(n_candidates=20) # same as for HalvingGridSearchCV\n sh.fit(X, y)\n assert sh.n_required_iterations_ == 5\n assert sh.n_iterations_ == n_iterations\n assert sh.n_possible_iterations_ == n_possible_iterations\n\n\[email protected](\"Est\", (HalvingRandomSearchCV, HalvingGridSearchCV))\ndef test_resource_parameter(Est):\n # Test the resource parameter\n\n n_samples = 1000\n X, y = make_classification(n_samples=n_samples, random_state=0)\n param_grid = {\"a\": [1, 2], \"b\": list(range(10))}\n base_estimator = FastClassifier()\n sh = Est(base_estimator, param_grid, cv=2, resource=\"c\", max_resources=10, factor=3)\n sh.fit(X, y)\n assert set(sh.n_resources_) == set([1, 3, 9])\n for r_i, params, param_c in zip(\n sh.cv_results_[\"n_resources\"],\n sh.cv_results_[\"params\"],\n sh.cv_results_[\"param_c\"],\n ):\n assert r_i == params[\"c\"] == param_c\n\n with pytest.raises(\n ValueError, match=\"Cannot use resource=1234 which is not supported \"\n ):\n sh = HalvingGridSearchCV(\n base_estimator, param_grid, cv=2, resource=\"1234\", max_resources=10\n )\n sh.fit(X, y)\n\n with pytest.raises(\n ValueError,\n match=(\n \"Cannot use parameter c as the resource since it is part \"\n \"of the searched parameters.\"\n ),\n ):\n param_grid = {\"a\": [1, 2], \"b\": [1, 2], \"c\": [1, 3]}\n sh = HalvingGridSearchCV(\n base_estimator, param_grid, cv=2, resource=\"c\", max_resources=10\n )\n sh.fit(X, y)\n\n\[email protected](\n \"max_resources, n_candidates, expected_n_candidates\",\n [\n (512, \"exhaust\", 128), # generate exactly as much as needed\n (32, \"exhaust\", 8),\n (32, 8, 8),\n (32, 7, 7), # ask for less than what we could\n (32, 9, 9), # ask for more than 'reasonable'\n ],\n)\ndef test_random_search(max_resources, n_candidates, expected_n_candidates):\n # Test random search and make sure the number of generated candidates is\n # as expected\n\n n_samples = 1024\n X, y = make_classification(n_samples=n_samples, random_state=0)\n param_grid = {\"a\": norm, \"b\": norm}\n base_estimator = FastClassifier()\n sh = HalvingRandomSearchCV(\n base_estimator,\n param_grid,\n n_candidates=n_candidates,\n cv=2,\n max_resources=max_resources,\n factor=2,\n min_resources=4,\n )\n sh.fit(X, y)\n assert sh.n_candidates_[0] == expected_n_candidates\n if n_candidates == \"exhaust\":\n # Make sure 'exhaust' makes the last iteration use as much resources as\n # we can\n assert sh.n_resources_[-1] == max_resources\n\n\[email protected](\n \"param_distributions, expected_n_candidates\",\n [\n ({\"a\": [1, 2]}, 2), # all lists, sample less than n_candidates\n ({\"a\": randint(1, 3)}, 10), # not all list, respect n_candidates\n ],\n)\ndef test_random_search_discrete_distributions(\n param_distributions, expected_n_candidates\n):\n # Make sure random search samples the appropriate number of candidates when\n # we ask for more than what's possible. How many parameters are sampled\n # depends whether the distributions are 'all lists' or not (see\n # ParameterSampler for details). This is somewhat redundant with the checks\n # in ParameterSampler but interaction bugs were discovered during\n # developement of SH\n\n n_samples = 1024\n X, y = make_classification(n_samples=n_samples, random_state=0)\n base_estimator = FastClassifier()\n sh = HalvingRandomSearchCV(base_estimator, param_distributions, n_candidates=10)\n sh.fit(X, y)\n assert sh.n_candidates_[0] == expected_n_candidates\n\n\[email protected](\"Est\", (HalvingGridSearchCV, HalvingRandomSearchCV))\[email protected](\n \"params, expected_error_message\",\n [\n ({\"scoring\": {\"accuracy\", \"accuracy\"}}, \"Multimetric scoring is not supported\"),\n (\n {\"resource\": \"not_a_parameter\"},\n \"Cannot use resource=not_a_parameter which is not supported\",\n ),\n (\n {\"resource\": \"a\", \"max_resources\": 100},\n \"Cannot use parameter a as the resource since it is part of\",\n ),\n ({\"max_resources\": \"not_auto\"}, \"max_resources must be either\"),\n ({\"max_resources\": 100.5}, \"max_resources must be either\"),\n ({\"max_resources\": -10}, \"max_resources must be either\"),\n ({\"min_resources\": \"bad str\"}, \"min_resources must be either\"),\n ({\"min_resources\": 0.5}, \"min_resources must be either\"),\n ({\"min_resources\": -10}, \"min_resources must be either\"),\n (\n {\"max_resources\": \"auto\", \"resource\": \"b\"},\n \"max_resources can only be 'auto' if resource='n_samples'\",\n ),\n (\n {\"min_resources\": 15, \"max_resources\": 14},\n \"min_resources_=15 is greater than max_resources_=14\",\n ),\n ({\"cv\": KFold(shuffle=True)}, \"must yield consistent folds\"),\n ({\"cv\": ShuffleSplit()}, \"must yield consistent folds\"),\n ({\"refit\": \"whatever\"}, \"refit is expected to be a boolean\"),\n ],\n)\ndef test_input_errors(Est, params, expected_error_message):\n base_estimator = FastClassifier()\n param_grid = {\"a\": [1]}\n X, y = make_classification(100)\n\n sh = Est(base_estimator, param_grid, **params)\n\n with pytest.raises(ValueError, match=expected_error_message):\n sh.fit(X, y)\n\n\[email protected](\n \"params, expected_error_message\",\n [\n (\n {\"n_candidates\": \"exhaust\", \"min_resources\": \"exhaust\"},\n \"cannot be both set to 'exhaust'\",\n ),\n ({\"n_candidates\": \"bad\"}, \"either 'exhaust' or a positive integer\"),\n ({\"n_candidates\": 0}, \"either 'exhaust' or a positive integer\"),\n ],\n)\ndef test_input_errors_randomized(params, expected_error_message):\n # tests specific to HalvingRandomSearchCV\n\n base_estimator = FastClassifier()\n param_grid = {\"a\": [1]}\n X, y = make_classification(100)\n\n sh = HalvingRandomSearchCV(base_estimator, param_grid, **params)\n\n with pytest.raises(ValueError, match=expected_error_message):\n sh.fit(X, y)\n\n\[email protected](\n \"fraction, subsample_test, expected_train_size, expected_test_size\",\n [\n (0.5, True, 40, 10),\n (0.5, False, 40, 20),\n (0.2, True, 16, 4),\n (0.2, False, 16, 20),\n ],\n)\ndef test_subsample_splitter_shapes(\n fraction, subsample_test, expected_train_size, expected_test_size\n):\n # Make sure splits returned by SubsampleMetaSplitter are of appropriate\n # size\n\n n_samples = 100\n X, y = make_classification(n_samples)\n cv = _SubsampleMetaSplitter(\n base_cv=KFold(5),\n fraction=fraction,\n subsample_test=subsample_test,\n random_state=None,\n )\n\n for train, test in cv.split(X, y):\n assert train.shape[0] == expected_train_size\n assert test.shape[0] == expected_test_size\n if subsample_test:\n assert train.shape[0] + test.shape[0] == int(n_samples * fraction)\n else:\n assert test.shape[0] == n_samples // cv.base_cv.get_n_splits()\n\n\[email protected](\"subsample_test\", (True, False))\ndef test_subsample_splitter_determinism(subsample_test):\n # Make sure _SubsampleMetaSplitter is consistent across calls to split():\n # - we're OK having training sets differ (they're always sampled with a\n # different fraction anyway)\n # - when we don't subsample the test set, we want it to be always the same.\n # This check is the most important. This is ensured by the determinism\n # of the base_cv.\n\n # Note: we could force both train and test splits to be always the same if\n # we drew an int seed in _SubsampleMetaSplitter.__init__\n\n n_samples = 100\n X, y = make_classification(n_samples)\n cv = _SubsampleMetaSplitter(\n base_cv=KFold(5), fraction=0.5, subsample_test=subsample_test, random_state=None\n )\n\n folds_a = list(cv.split(X, y, groups=None))\n folds_b = list(cv.split(X, y, groups=None))\n\n for (train_a, test_a), (train_b, test_b) in zip(folds_a, folds_b):\n assert not np.all(train_a == train_b)\n\n if subsample_test:\n assert not np.all(test_a == test_b)\n else:\n assert np.all(test_a == test_b)\n assert np.all(X[test_a] == X[test_b])\n\n\[email protected](\n \"k, itr, expected\",\n [\n (1, 0, [\"c\"]),\n (2, 0, [\"a\", \"c\"]),\n (4, 0, [\"d\", \"b\", \"a\", \"c\"]),\n (10, 0, [\"d\", \"b\", \"a\", \"c\"]),\n (1, 1, [\"e\"]),\n (2, 1, [\"f\", \"e\"]),\n (10, 1, [\"f\", \"e\"]),\n (1, 2, [\"i\"]),\n (10, 2, [\"g\", \"h\", \"i\"]),\n ],\n)\ndef test_top_k(k, itr, expected):\n\n results = { # this isn't a 'real world' result dict\n \"iter\": [0, 0, 0, 0, 1, 1, 2, 2, 2],\n \"mean_test_score\": [4, 3, 5, 1, 11, 10, 5, 6, 9],\n \"params\": [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\"],\n }\n got = _top_k(results, k=k, itr=itr)\n assert np.all(got == expected)\n\n\[email protected](\"Est\", (HalvingRandomSearchCV, HalvingGridSearchCV))\ndef test_cv_results(Est):\n # test that the cv_results_ matches correctly the logic of the\n # tournament: in particular that the candidates continued in each\n # successive iteration are those that were best in the previous iteration\n pd = pytest.importorskip(\"pandas\")\n\n rng = np.random.RandomState(0)\n\n n_samples = 1000\n X, y = make_classification(n_samples=n_samples, random_state=0)\n param_grid = {\"a\": (\"l1\", \"l2\"), \"b\": list(range(30))}\n base_estimator = FastClassifier()\n\n # generate random scores: we want to avoid ties, which would otherwise\n # mess with the ordering and make testing harder\n def scorer(est, X, y):\n return rng.rand()\n\n sh = Est(base_estimator, param_grid, factor=2, scoring=scorer)\n if Est is HalvingRandomSearchCV:\n # same number of candidates as with the grid\n sh.set_params(n_candidates=2 * 30, min_resources=\"exhaust\")\n\n sh.fit(X, y)\n\n # non-regression check for\n # https://github.com/scikit-learn/scikit-learn/issues/19203\n assert isinstance(sh.cv_results_[\"iter\"], np.ndarray)\n assert isinstance(sh.cv_results_[\"n_resources\"], np.ndarray)\n\n cv_results_df = pd.DataFrame(sh.cv_results_)\n\n # just make sure we don't have ties\n assert len(cv_results_df[\"mean_test_score\"].unique()) == len(cv_results_df)\n\n cv_results_df[\"params_str\"] = cv_results_df[\"params\"].apply(str)\n table = cv_results_df.pivot(\n index=\"params_str\", columns=\"iter\", values=\"mean_test_score\"\n )\n\n # table looks like something like this:\n # iter 0 1 2 3 4 5\n # params_str\n # {'a': 'l2', 'b': 23} 0.75 NaN NaN NaN NaN NaN\n # {'a': 'l1', 'b': 30} 0.90 0.875 NaN NaN NaN NaN\n # {'a': 'l1', 'b': 0} 0.75 NaN NaN NaN NaN NaN\n # {'a': 'l2', 'b': 3} 0.85 0.925 0.9125 0.90625 NaN NaN\n # {'a': 'l1', 'b': 5} 0.80 NaN NaN NaN NaN NaN\n # ...\n\n # where a NaN indicates that the candidate wasn't evaluated at a given\n # iteration, because it wasn't part of the top-K at some previous\n # iteration. We here make sure that candidates that aren't in the top-k at\n # any given iteration are indeed not evaluated at the subsequent\n # iterations.\n nan_mask = pd.isna(table)\n n_iter = sh.n_iterations_\n for it in range(n_iter - 1):\n already_discarded_mask = nan_mask[it]\n\n # make sure that if a candidate is already discarded, we don't evaluate\n # it later\n assert (\n already_discarded_mask & nan_mask[it + 1] == already_discarded_mask\n ).all()\n\n # make sure that the number of discarded candidate is correct\n discarded_now_mask = ~already_discarded_mask & nan_mask[it + 1]\n kept_mask = ~already_discarded_mask & ~discarded_now_mask\n assert kept_mask.sum() == sh.n_candidates_[it + 1]\n\n # make sure that all discarded candidates have a lower score than the\n # kept candidates\n discarded_max_score = table[it].where(discarded_now_mask).max()\n kept_min_score = table[it].where(kept_mask).min()\n assert discarded_max_score < kept_min_score\n\n # We now make sure that the best candidate is chosen only from the last\n # iteration.\n # We also make sure this is true even if there were higher scores in\n # earlier rounds (this isn't generally the case, but worth ensuring it's\n # possible).\n\n last_iter = cv_results_df[\"iter\"].max()\n idx_best_last_iter = cv_results_df[cv_results_df[\"iter\"] == last_iter][\n \"mean_test_score\"\n ].idxmax()\n idx_best_all_iters = cv_results_df[\"mean_test_score\"].idxmax()\n\n assert sh.best_params_ == cv_results_df.iloc[idx_best_last_iter][\"params\"]\n assert (\n cv_results_df.iloc[idx_best_last_iter][\"mean_test_score\"]\n < cv_results_df.iloc[idx_best_all_iters][\"mean_test_score\"]\n )\n assert (\n cv_results_df.iloc[idx_best_last_iter][\"params\"]\n != cv_results_df.iloc[idx_best_all_iters][\"params\"]\n )\n\n\[email protected](\"Est\", (HalvingGridSearchCV, HalvingRandomSearchCV))\ndef test_base_estimator_inputs(Est):\n # make sure that the base estimators are passed the correct parameters and\n # number of samples at each iteration.\n pd = pytest.importorskip(\"pandas\")\n\n passed_n_samples_fit = []\n passed_n_samples_predict = []\n passed_params = []\n\n class FastClassifierBookKeeping(FastClassifier):\n def fit(self, X, y):\n passed_n_samples_fit.append(X.shape[0])\n return super().fit(X, y)\n\n def predict(self, X):\n passed_n_samples_predict.append(X.shape[0])\n return super().predict(X)\n\n def set_params(self, **params):\n passed_params.append(params)\n return super().set_params(**params)\n\n n_samples = 1024\n n_splits = 2\n X, y = make_classification(n_samples=n_samples, random_state=0)\n param_grid = {\"a\": (\"l1\", \"l2\"), \"b\": list(range(30))}\n base_estimator = FastClassifierBookKeeping()\n\n sh = Est(\n base_estimator,\n param_grid,\n factor=2,\n cv=n_splits,\n return_train_score=False,\n refit=False,\n )\n if Est is HalvingRandomSearchCV:\n # same number of candidates as with the grid\n sh.set_params(n_candidates=2 * 30, min_resources=\"exhaust\")\n\n sh.fit(X, y)\n\n assert len(passed_n_samples_fit) == len(passed_n_samples_predict)\n passed_n_samples = [\n x + y for (x, y) in zip(passed_n_samples_fit, passed_n_samples_predict)\n ]\n\n # Lists are of length n_splits * n_iter * n_candidates_at_i.\n # Each chunk of size n_splits corresponds to the n_splits folds for the\n # same candidate at the same iteration, so they contain equal values. We\n # subsample such that the lists are of length n_iter * n_candidates_at_it\n passed_n_samples = passed_n_samples[::n_splits]\n passed_params = passed_params[::n_splits]\n\n cv_results_df = pd.DataFrame(sh.cv_results_)\n\n assert len(passed_params) == len(passed_n_samples) == len(cv_results_df)\n\n uniques, counts = np.unique(passed_n_samples, return_counts=True)\n assert (sh.n_resources_ == uniques).all()\n assert (sh.n_candidates_ == counts).all()\n\n assert (cv_results_df[\"params\"] == passed_params).all()\n assert (cv_results_df[\"n_resources\"] == passed_n_samples).all()\n\n\[email protected](\"Est\", (HalvingGridSearchCV, HalvingRandomSearchCV))\ndef test_groups_support(Est):\n # Check if ValueError (when groups is None) propagates to\n # HalvingGridSearchCV and HalvingRandomSearchCV\n # And also check if groups is correctly passed to the cv object\n rng = np.random.RandomState(0)\n\n X, y = make_classification(n_samples=50, n_classes=2, random_state=0)\n groups = rng.randint(0, 3, 50)\n\n clf = LinearSVC(random_state=0)\n grid = {\"C\": [1]}\n\n group_cvs = [\n LeaveOneGroupOut(),\n LeavePGroupsOut(2),\n GroupKFold(n_splits=3),\n GroupShuffleSplit(random_state=0),\n ]\n error_msg = \"The 'groups' parameter should not be None.\"\n for cv in group_cvs:\n gs = Est(clf, grid, cv=cv)\n with pytest.raises(ValueError, match=error_msg):\n gs.fit(X, y)\n gs.fit(X, y, groups=groups)\n\n non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit(random_state=0)]\n for cv in non_group_cvs:\n gs = Est(clf, grid, cv=cv)\n # Should not raise an error\n gs.fit(X, y)\n\n\[email protected](\"SearchCV\", [HalvingRandomSearchCV, HalvingGridSearchCV])\ndef test_min_resources_null(SearchCV):\n \"\"\"Check that we raise an error if the minimum resources is set to 0.\"\"\"\n base_estimator = FastClassifier()\n param_grid = {\"a\": [1]}\n X = np.empty(0).reshape(0, 3)\n\n search = SearchCV(base_estimator, param_grid, min_resources=\"smallest\")\n\n err_msg = \"min_resources_=0: you might have passed an empty dataset X.\"\n with pytest.raises(ValueError, match=err_msg):\n search.fit(X, [])\n\n\[email protected](\"SearchCV\", [HalvingGridSearchCV, HalvingRandomSearchCV])\ndef test_select_best_index(SearchCV):\n \"\"\"Check the selection strategy of the halving search.\"\"\"\n results = { # this isn't a 'real world' result dict\n \"iter\": np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]),\n \"mean_test_score\": np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]),\n \"params\": np.array([\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\"]),\n }\n\n # we expect the index of 'i'\n best_index = SearchCV._select_best_index(None, None, results)\n assert best_index == 8\n", "\"\"\"\n==================================================\nStatistical comparison of models using grid search\n==================================================\n\nThis example illustrates how to statistically compare the performance of models\ntrained and evaluated using :class:`~sklearn.model_selection.GridSearchCV`.\n\"\"\"\n\n# %%\n# We will start by simulating moon shaped data (where the ideal separation\n# between classes is non-linear), adding to it a moderate degree of noise.\n# Datapoints will belong to one of two possible classes to be predicted by two\n# features. We will simulate 50 samples for each class:\n\nprint(__doc__)\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.datasets import make_moons\n\nX, y = make_moons(noise=0.352, random_state=1, n_samples=100)\n\nsns.scatterplot(\n x=X[:, 0], y=X[:, 1], hue=y,\n marker='o', s=25, edgecolor='k', legend=False\n).set_title(\"Data\")\nplt.show()\n\n# %%\n# We will compare the performance of :class:`~sklearn.svm.SVC` estimators that\n# vary on their `kernel` parameter, to decide which choice of this\n# hyper-parameter predicts our simulated data best.\n# We will evaluate the performance of the models using\n# :class:`~sklearn.model_selection.RepeatedStratifiedKFold`, repeating 10 times\n# a 10-fold stratified cross validation using a different randomization of the\n# data in each repetition. The performance will be evaluated using\n# :class:`~sklearn.metrics.roc_auc_score`.\n\nfrom sklearn.model_selection import GridSearchCV, RepeatedStratifiedKFold\nfrom sklearn.svm import SVC\n\nparam_grid = [\n {'kernel': ['linear']},\n {'kernel': ['poly'], 'degree': [2, 3]},\n {'kernel': ['rbf']}\n]\n\nsvc = SVC(random_state=0)\n\ncv = RepeatedStratifiedKFold(\n n_splits=10, n_repeats=10, random_state=0\n)\n\nsearch = GridSearchCV(\n estimator=svc, param_grid=param_grid,\n scoring='roc_auc', cv=cv\n)\nsearch.fit(X, y)\n\n# %%\n# We can now inspect the results of our search, sorted by their\n# `mean_test_score`:\n\nimport pandas as pd\n\nresults_df = pd.DataFrame(search.cv_results_)\nresults_df = results_df.sort_values(by=['rank_test_score'])\nresults_df = (\n results_df\n .set_index(results_df[\"params\"].apply(\n lambda x: \"_\".join(str(val) for val in x.values()))\n )\n .rename_axis('kernel')\n)\nresults_df[\n ['params', 'rank_test_score', 'mean_test_score', 'std_test_score']\n]\n\n# %%\n# We can see that the estimator using the `'rbf'` kernel performed best,\n# closely followed by `'linear'`. Both estimators with a `'poly'` kernel\n# performed worse, with the one using a two-degree polynomial achieving a much\n# lower perfomance than all other models.\n#\n# Usually, the analysis just ends here, but half the story is missing. The\n# output of :class:`~sklearn.model_selection.GridSearchCV` does not provide\n# information on the certainty of the differences between the models.\n# We don't know if these are **statistically** significant.\n# To evaluate this, we need to conduct a statistical test.\n# Specifically, to contrast the performance of two models we should\n# statistically compare their AUC scores. There are 100 samples (AUC\n# scores) for each model as we repreated 10 times a 10-fold cross-validation.\n#\n# However, the scores of the models are not independent: all models are\n# evaluated on the **same** 100 partitions, increasing the correlation\n# between the performance of the models.\n# Since some partitions of the data can make the distinction of the classes\n# particularly easy or hard to find for all models, the models scores will\n# co-vary.\n#\n# Let's inspect this partition effect by plotting the performance of all models\n# in each fold, and calculating the correlation between models across folds:\n\n# create df of model scores ordered by perfomance\nmodel_scores = results_df.filter(regex=r'split\\d*_test_score')\n\n# plot 30 examples of dependency between cv fold and AUC scores\nfig, ax = plt.subplots()\nsns.lineplot(\n data=model_scores.transpose().iloc[:30],\n dashes=False, palette='Set1', marker='o', alpha=.5, ax=ax\n)\nax.set_xlabel(\"CV test fold\", size=12, labelpad=10)\nax.set_ylabel(\"Model AUC\", size=12)\nax.tick_params(bottom=True, labelbottom=False)\nplt.show()\n\n# print correlation of AUC scores across folds\nprint(f\"Correlation of models:\\n {model_scores.transpose().corr()}\")\n\n# %%\n# We can observe that the performance of the models highly depends on the fold.\n#\n# As a consequence, if we assume independence between samples we will be\n# underestimating the variance computed in our statistical tests, increasing\n# the number of false positive errors (i.e. detecting a significant difference\n# between models when such does not exist) [1]_.\n#\n# Several variance-corrected statistical tests have been developed for these\n# cases. In this example we will show how to implement one of them (the so\n# called Nadeau and Bengio's corrected t-test) under two different statistical\n# frameworks: frequentist and Bayesian.\n\n# %%\n# Comparing two models: frequentist approach\n# ------------------------------------------\n#\n# We can start by asking: \"Is the first model significantly better than the\n# second model (when ranked by `mean_test_score`)?\"\n#\n# To answer this question using a frequentist approach we could\n# run a paired t-test and compute the p-value. This is also known as\n# Diebold-Mariano test in the forecast literature [5]_.\n# Many variants of such a t-test have been developed to account for the\n# 'non-independence of samples problem'\n# described in the previous section. We will use the one proven to obtain the\n# highest replicability scores (which rate how similar the performance of a\n# model is when evaluating it on different random partitions of the same\n# dataset) while mantaining a low rate of false postitives and false negatives:\n# the Nadeau and Bengio's corrected t-test [2]_ that uses a 10 times repeated\n# 10-fold cross validation [3]_.\n#\n# This corrected paired t-test is computed as:\n#\n# .. math::\n# t=\\frac{\\frac{1}{k \\cdot r}\\sum_{i=1}^{k}\\sum_{j=1}^{r}x_{ij}}\n# {\\sqrt{(\\frac{1}{k \\cdot r}+\\frac{n_{test}}{n_{train}})\\hat{\\sigma}^2}}\n#\n# where :math:`k` is the number of folds,\n# :math:`r` the number of repetitions in the cross-validation,\n# :math:`x` is the difference in performance of the models,\n# :math:`n_{test}` is the number of samples used for testing,\n# :math:`n_{train}` is the number of samples used for training,\n# and :math:`\\hat{\\sigma}^2` represents the variance of the observed\n# differences.\n#\n# Let's implement a corrected right-tailed paired t-test to evaluate if the\n# performance of the first model is significantly better than that of the\n# second model. Our null hypothesis is that the second model performs at least\n# as good as the first model.\n\nimport numpy as np\nfrom scipy.stats import t\n\n\ndef corrected_std(differences, n_train, n_test):\n \"\"\"Corrects standard deviation using Nadeau and Bengio's approach.\n\n Parameters\n ----------\n differences : ndarray of shape (n_samples, 1)\n Vector containing the differences in the score metrics of two models.\n n_train : int\n Number of samples in the training set.\n n_test : int\n Number of samples in the testing set.\n\n Returns\n -------\n corrected_std : int\n Variance-corrected standard deviation of the set of differences.\n \"\"\"\n kr = len(differences)\n corrected_var = (\n np.var(differences, ddof=1) * (1 / kr + n_test / n_train)\n )\n corrected_std = np.sqrt(corrected_var)\n return corrected_std\n\n\ndef compute_corrected_ttest(differences, df, n_train, n_test):\n \"\"\"Computes right-tailed paired t-test with corrected variance.\n\n Parameters\n ----------\n differences : array-like of shape (n_samples, 1)\n Vector containing the differences in the score metrics of two models.\n df : int\n Degrees of freedom.\n n_train : int\n Number of samples in the training set.\n n_test : int\n Number of samples in the testing set.\n\n Returns\n -------\n t_stat : float\n Variance-corrected t-statistic.\n p_val : float\n Variance-corrected p-value.\n \"\"\"\n mean = np.mean(differences)\n std = corrected_std(differences, n_train, n_test)\n t_stat = mean / std\n p_val = t.sf(np.abs(t_stat), df) # right-tailed t-test\n return t_stat, p_val\n\n\n# %%\nmodel_1_scores = model_scores.iloc[0].values # scores of the best model\nmodel_2_scores = model_scores.iloc[1].values # scores of the second-best model\n\ndifferences = model_1_scores - model_2_scores\n\nn = differences.shape[0] # number of test sets\ndf = n - 1\nn_train = len(list(cv.split(X, y))[0][0])\nn_test = len(list(cv.split(X, y))[0][1])\n\nt_stat, p_val = compute_corrected_ttest(differences, df, n_train, n_test)\nprint(f\"Corrected t-value: {t_stat:.3f}\\n\"\n f\"Corrected p-value: {p_val:.3f}\")\n\n# %%\n# We can compare the corrected t- and p-values with the uncorrected ones:\n\nt_stat_uncorrected = (\n np.mean(differences) / np.sqrt(np.var(differences, ddof=1) / n)\n)\np_val_uncorrected = t.sf(np.abs(t_stat_uncorrected), df)\n\nprint(f\"Uncorrected t-value: {t_stat_uncorrected:.3f}\\n\"\n f\"Uncorrected p-value: {p_val_uncorrected:.3f}\")\n\n# %%\n# Using the conventional significance alpha level at `p=0.05`, we observe that\n# the uncorrected t-test concludes that the first model is significantly better\n# than the second.\n#\n# With the corrected approach, in contrast, we fail to detect this difference.\n#\n# In the latter case, however, the frequentist approach does not let us\n# conclude that the first and second model have an equivalent performance. If\n# we wanted to make this assertion we need to use a Bayesian approach.\n\n# %%\n# Comparing two models: Bayesian approach\n# ---------------------------------------\n# We can use Bayesian estimation to calculate the probability that the first\n# model is better than the second. Bayesian estimation will output a\n# distribution followed by the mean :math:`\\mu` of the differences in the\n# performance of two models.\n#\n# To obtain the posterior distribution we need to define a prior that models\n# our beliefs of how the mean is distributed before looking at the data,\n# and multiply it by a likelihood function that computes how likely our\n# observed differences are, given the values that the mean of differences\n# could take.\n#\n# Bayesian estimation can be carried out in many forms to answer our question,\n# but in this example we will implement the approach suggested by Benavoli and\n# collegues [4]_.\n#\n# One way of defining our posterior using a closed-form expression is to select\n# a prior conjugate to the likelihood function. Benavoli and collegues [4]_\n# show that when comparing the performance of two classifiers we can model the\n# prior as a Normal-Gamma distribution (with both mean and variance unknown)\n# conjugate to a normal likelihood, to thus express the posterior as a normal\n# distribution.\n# Marginalizing out the variance from this normal posterior, we can define the\n# posterior of the mean parameter as a Student's t-distribution. Specifically:\n#\n# .. math::\n# St(\\mu;n-1,\\overline{x},(\\frac{1}{n}+\\frac{n_{test}}{n_{train}})\n# \\hat{\\sigma}^2)\n#\n# where :math:`n` is the total number of samples,\n# :math:`\\overline{x}` represents the mean difference in the scores,\n# :math:`n_{test}` is the number of samples used for testing,\n# :math:`n_{train}` is the number of samples used for training,\n# and :math:`\\hat{\\sigma}^2` represents the variance of the observed\n# differences.\n#\n# Notice that we are using Nadeau and Bengio's corrected variance in our\n# Bayesian approach as well.\n#\n# Let's compute and plot the posterior:\n\n# intitialize random variable\nt_post = t(\n df, loc=np.mean(differences),\n scale=corrected_std(differences, n_train, n_test)\n)\n\n# %%\n# Let's plot the posterior distribution:\n\nx = np.linspace(t_post.ppf(0.001), t_post.ppf(0.999), 100)\n\nplt.plot(x, t_post.pdf(x))\nplt.xticks(np.arange(-0.04, 0.06, 0.01))\nplt.fill_between(x, t_post.pdf(x), 0, facecolor='blue', alpha=.2)\nplt.ylabel(\"Probability density\")\nplt.xlabel(r\"Mean difference ($\\mu$)\")\nplt.title(\"Posterior distribution\")\nplt.show()\n\n# %%\n# We can calculate the probability that the first model is better than the\n# second by computing the area under the curve of the posterior distirbution\n# from zero to infinity. And also the reverse: we can calculate the probability\n# that the second model is better than the first by computing the area under\n# the curve from minus infinity to zero.\n\nbetter_prob = 1 - t_post.cdf(0)\n\nprint(f\"Probability of {model_scores.index[0]} being more accurate than \"\n f\"{model_scores.index[1]}: {better_prob:.3f}\")\nprint(f\"Probability of {model_scores.index[1]} being more accurate than \"\n f\"{model_scores.index[0]}: {1 - better_prob:.3f}\")\n\n# %%\n# In contrast with the frequentist approach, we can compute the probability\n# that one model is better than the other.\n#\n# Note that we obtained similar results as those in the frequentist approach.\n# Given our choice of priors, we are essentially performing the same\n# computations, but we are allowed to make different assertions.\n\n# %%\n# Region of Practical Equivalence\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n# Sometimes we are interested in determining the probabilities that our models\n# have an equivalent performance, where \"equivalent\" is defined in a practical\n# way. A naive approach [4]_ would be to define estimators as practically\n# equivalent when they differ by less than 1% in their accuracy. But we could\n# also define this practical equivalence taking into account the problem we are\n# trying to solve. For example, a difference of 5% in accuracy would mean an\n# increase of $1000 in sales, and we consider any quantity above that as\n# relevant for our business.\n#\n# In this example we are going to define the\n# Region of Practical Equivalence (ROPE) to be :math:`[-0.01, 0.01]`. That is,\n# we will consider two models as practically equivalent if they differ by less\n# than 1% in their performance.\n#\n# To compute the probabilities of the classifiers being practically equivalent,\n# we calculate the area under the curve of the posterior over the ROPE\n# interval:\n\nrope_interval = [-0.01, 0.01]\nrope_prob = t_post.cdf(rope_interval[1]) - t_post.cdf(rope_interval[0])\n\nprint(f\"Probability of {model_scores.index[0]} and {model_scores.index[1]} \"\n f\"being practically equivalent: {rope_prob:.3f}\")\n\n# %%\n# We can plot how the posterior is distributed over the ROPE interval:\n\nx_rope = np.linspace(rope_interval[0], rope_interval[1], 100)\n\nplt.plot(x, t_post.pdf(x))\nplt.xticks(np.arange(-0.04, 0.06, 0.01))\nplt.vlines([-0.01, 0.01], ymin=0, ymax=(np.max(t_post.pdf(x)) + 1))\nplt.fill_between(x_rope, t_post.pdf(x_rope), 0, facecolor='blue', alpha=.2)\nplt.ylabel(\"Probability density\")\nplt.xlabel(r\"Mean difference ($\\mu$)\")\nplt.title(\"Posterior distribution under the ROPE\")\nplt.show()\n\n# %%\n# As suggested in [4]_, we can further interpret these probabilities using the\n# same criteria as the frequentist approach: is the probability of falling\n# inside the ROPE bigger than 95% (alpha value of 5%)? In that case we can\n# conclude that both models are practically equivalent.\n\n# %%\n# The Bayesian estimation approach also allows us to compute how uncertain we\n# are about our estimation of the difference. This can be calculated using\n# credible intervals. For a given probability, they show the range of values\n# that the estimated quantity, in our case the mean difference in\n# performance, can take.\n# For example, a 50% credible interval [x, y] tells us that there is a 50%\n# probability that the true (mean) difference of performance between models is\n# between x and y.\n#\n# Let's determine the credible intervals of our data using 50%, 75% and 95%:\n\ncred_intervals = []\nintervals = [0.5, 0.75, 0.95]\n\nfor interval in intervals:\n cred_interval = list(t_post.interval(interval))\n cred_intervals.append([interval, cred_interval[0], cred_interval[1]])\n\ncred_int_df = pd.DataFrame(\n cred_intervals,\n columns=['interval', 'lower value', 'upper value']\n).set_index('interval')\ncred_int_df\n\n# %%\n# As shown in the table, there is a 50% probability that the true mean\n# difference between models will be between 0.000977 and 0.019023, 70%\n# probability that it will be between -0.005422 and 0.025422, and 95%\n# probability that it will be between -0.016445\tand 0.036445.\n\n# %%\n# Pairwise comparison of all models: frequentist approach\n# -------------------------------------------------------\n#\n# We could also be interested in comparing the performance of all our models\n# evaluated with :class:`~sklearn.model_selection.GridSearchCV`. In this case\n# we would be running our statistical test multiple times, which leads us to\n# the `multiple comparisons problem\n# <https://en.wikipedia.org/wiki/Multiple_comparisons_problem>`_.\n#\n# There are many possible ways to tackle this problem, but a standard approach\n# is to apply a `Bonferroni correction\n# <https://en.wikipedia.org/wiki/Bonferroni_correction>`_. Bonferroni can be\n# computed by multiplying the p-value by the number of comparisons we are\n# testing.\n#\n# Let's compare the performance of the models using the corrected t-test:\n\nfrom itertools import combinations\nfrom math import factorial\n\nn_comparisons = (\n factorial(len(model_scores))\n / (factorial(2) * factorial(len(model_scores) - 2))\n)\npairwise_t_test = []\n\nfor model_i, model_k in combinations(range(len(model_scores)), 2):\n model_i_scores = model_scores.iloc[model_i].values\n model_k_scores = model_scores.iloc[model_k].values\n differences = model_i_scores - model_k_scores\n t_stat, p_val = compute_corrected_ttest(\n differences, df, n_train, n_test\n )\n p_val *= n_comparisons # implement Bonferroni correction\n # Bonferroni can output p-values higher than 1\n p_val = 1 if p_val > 1 else p_val\n pairwise_t_test.append(\n [model_scores.index[model_i], model_scores.index[model_k],\n t_stat, p_val]\n )\n\npairwise_comp_df = pd.DataFrame(\n pairwise_t_test,\n columns=['model_1', 'model_2', 't_stat', 'p_val']\n).round(3)\npairwise_comp_df\n\n# %%\n# We observe that after correcting for multiple comparisons, the only model\n# that significantly differs from the others is `'2_poly'`.\n# `'rbf'`, the model ranked first by\n# :class:`~sklearn.model_selection.GridSearchCV`, does not significantly\n# differ from `'linear'` or `'3_poly'`.\n\n# %%\n# Pairwise comparison of all models: Bayesian approach\n# ----------------------------------------------------\n#\n# When using Bayesian estimation to compare multiple models, we don't need to\n# correct for multiple comparisons (for reasons why see [4]_).\n#\n# We can carry out our pairwise comparisons the same way as in the first\n# section:\n\npairwise_bayesian = []\n\nfor model_i, model_k in combinations(range(len(model_scores)), 2):\n model_i_scores = model_scores.iloc[model_i].values\n model_k_scores = model_scores.iloc[model_k].values\n differences = model_i_scores - model_k_scores\n t_post = t(\n df, loc=np.mean(differences),\n scale=corrected_std(differences, n_train, n_test)\n )\n worse_prob = t_post.cdf(rope_interval[0])\n better_prob = 1 - t_post.cdf(rope_interval[1])\n rope_prob = t_post.cdf(rope_interval[1]) - t_post.cdf(rope_interval[0])\n\n pairwise_bayesian.append([worse_prob, better_prob, rope_prob])\n\npairwise_bayesian_df = (pd.DataFrame(\n pairwise_bayesian,\n columns=['worse_prob', 'better_prob', 'rope_prob']\n).round(3))\n\npairwise_comp_df = pairwise_comp_df.join(pairwise_bayesian_df)\npairwise_comp_df\n\n# %%\n# Using the Bayesian approach we can compute the probability that a model\n# performs better, worse or practically equivalent to another.\n#\n# Results show that the model ranked first by\n# :class:`~sklearn.model_selection.GridSearchCV` `'rbf'`, has approximately a\n# 6.8% chance of being worse than `'linear'`, and a 1.8% chance of being worse\n# than `'3_poly'`.\n# `'rbf'` and `'linear'` have a 43% probability of being practically\n# equivalent, while `'rbf'` and `'3_poly'` have a 10% chance of being so.\n#\n# Similarly to the conclusions obtained using the frequentist approach, all\n# models have a 100% probability of being better than `'2_poly'`, and none have\n# a practically equivalent performance with the latter.\n\n# %%\n# Take-home messages\n# ------------------\n# - Small differences in performance measures might easily turn out to be\n# merely by chance, but not because one model predicts systematically better\n# than the other. As shown in this example, statistics can tell you how\n# likely that is.\n# - When statistically comparing the performance of two models evaluated in\n# GridSearchCV, it is necessary to correct the calculated variance which\n# could be underestimated since the scores of the models are not independent\n# from each other.\n# - A frequentist approach that uses a (variance-corrected) paired t-test can\n# tell us if the performance of one model is better than another with a\n# degree of certainty above chance.\n# - A Bayesian approach can provide the probabilities of one model being\n# better, worse or practically equivalent than another. It can also tell us\n# how confident we are of knowing that the true differences of our models\n# fall under a certain range of values.\n# - If multiple models are statistically compared, a multiple comparisons\n# correction is needed when using the frequentist approach.\n\n# %%\n# .. topic:: References\n#\n# .. [1] Dietterich, T. G. (1998). `Approximate statistical tests for\n# comparing supervised classification learning algorithms\n# <http://web.cs.iastate.edu/~jtian/cs573/Papers/Dietterich-98.pdf>`_.\n# Neural computation, 10(7).\n# .. [2] Nadeau, C., & Bengio, Y. (2000). `Inference for the generalization\n# error\n# <https://papers.nips.cc/paper/1661-inference-for-the-generalization-error.pdf>`_.\n# In Advances in neural information processing systems.\n# .. [3] Bouckaert, R. R., & Frank, E. (2004). `Evaluating the replicability\n# of significance tests for comparing learning algorithms\n# <https://www.cms.waikato.ac.nz/~ml/publications/2004/bouckaert-frank.pdf>`_.\n# In Pacific-Asia Conference on Knowledge Discovery and Data Mining.\n# .. [4] Benavoli, A., Corani, G., Demšar, J., & Zaffalon, M. (2017). `Time\n# for a change: a tutorial for comparing multiple classifiers through\n# Bayesian analysis\n# <http://www.jmlr.org/papers/volume18/16-305/16-305.pdf>`_.\n# The Journal of Machine Learning Research, 18(1). See the Python\n# library that accompanies this paper `here\n# <https://github.com/janezd/baycomp>`_.\n# .. [5] Diebold, F.X. & Mariano R.S. (1995). `Comparing predictive accuracy\n# <http://www.est.uc3m.es/esp/nueva_docencia/comp_col_get/lade/tecnicas_prediccion/Practicas0708/Comparing%20Predictive%20Accuracy%20(Dielbold).pdf>`_\n# Journal of Business & economic statistics, 20(1), 134-144.\n", "\"\"\"Stochastic optimization methods for MLP\n\"\"\"\n\n# Authors: Jiyuan Qian <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\n\n\nclass BaseOptimizer:\n \"\"\"Base (Stochastic) gradient descent optimizer\n\n Parameters\n ----------\n params : list, length = len(coefs_) + len(intercepts_)\n The concatenated list containing coefs_ and intercepts_ in MLP model.\n Used for initializing velocities and updating params\n\n learning_rate_init : float, default=0.1\n The initial learning rate used. It controls the step-size in updating\n the weights\n\n Attributes\n ----------\n learning_rate : float\n the current learning rate\n \"\"\"\n\n def __init__(self, params, learning_rate_init=0.1):\n self.params = [param for param in params]\n self.learning_rate_init = learning_rate_init\n self.learning_rate = float(learning_rate_init)\n\n def update_params(self, grads):\n \"\"\"Update parameters with given gradients\n\n Parameters\n ----------\n grads : list, length = len(params)\n Containing gradients with respect to coefs_ and intercepts_ in MLP\n model. So length should be aligned with params\n \"\"\"\n updates = self._get_updates(grads)\n for param, update in zip(self.params, updates):\n param += update\n\n def iteration_ends(self, time_step):\n \"\"\"Perform update to learning rate and potentially other states at the\n end of an iteration\n \"\"\"\n pass\n\n def trigger_stopping(self, msg, verbose):\n \"\"\"Decides whether it is time to stop training\n\n Parameters\n ----------\n msg : str\n Message passed in for verbose output\n\n verbose : bool\n Print message to stdin if True\n\n Returns\n -------\n is_stopping : bool\n True if training needs to stop\n \"\"\"\n if verbose:\n print(msg + \" Stopping.\")\n return True\n\n\nclass SGDOptimizer(BaseOptimizer):\n \"\"\"Stochastic gradient descent optimizer with momentum\n\n Parameters\n ----------\n params : list, length = len(coefs_) + len(intercepts_)\n The concatenated list containing coefs_ and intercepts_ in MLP model.\n Used for initializing velocities and updating params\n\n learning_rate_init : float, default=0.1\n The initial learning rate used. It controls the step-size in updating\n the weights\n\n lr_schedule : {'constant', 'adaptive', 'invscaling'}, default='constant'\n Learning rate schedule for weight updates.\n\n -'constant', is a constant learning rate given by\n 'learning_rate_init'.\n\n -'invscaling' gradually decreases the learning rate 'learning_rate_' at\n each time step 't' using an inverse scaling exponent of 'power_t'.\n learning_rate_ = learning_rate_init / pow(t, power_t)\n\n -'adaptive', keeps the learning rate constant to\n 'learning_rate_init' as long as the training keeps decreasing.\n Each time 2 consecutive epochs fail to decrease the training loss by\n tol, or fail to increase validation score by tol if 'early_stopping'\n is on, the current learning rate is divided by 5.\n\n momentum : float, default=0.9\n Value of momentum used, must be larger than or equal to 0\n\n nesterov : bool, default=True\n Whether to use nesterov's momentum or not. Use nesterov's if True\n\n power_t : float, default=0.5\n Power of time step 't' in inverse scaling. See `lr_schedule` for\n more details.\n\n Attributes\n ----------\n learning_rate : float\n the current learning rate\n\n velocities : list, length = len(params)\n velocities that are used to update params\n \"\"\"\n\n def __init__(\n self,\n params,\n learning_rate_init=0.1,\n lr_schedule=\"constant\",\n momentum=0.9,\n nesterov=True,\n power_t=0.5,\n ):\n super().__init__(params, learning_rate_init)\n\n self.lr_schedule = lr_schedule\n self.momentum = momentum\n self.nesterov = nesterov\n self.power_t = power_t\n self.velocities = [np.zeros_like(param) for param in params]\n\n def iteration_ends(self, time_step):\n \"\"\"Perform updates to learning rate and potential other states at the\n end of an iteration\n\n Parameters\n ----------\n time_step : int\n number of training samples trained on so far, used to update\n learning rate for 'invscaling'\n \"\"\"\n if self.lr_schedule == \"invscaling\":\n self.learning_rate = (\n float(self.learning_rate_init) / (time_step + 1) ** self.power_t\n )\n\n def trigger_stopping(self, msg, verbose):\n if self.lr_schedule != \"adaptive\":\n if verbose:\n print(msg + \" Stopping.\")\n return True\n\n if self.learning_rate <= 1e-6:\n if verbose:\n print(msg + \" Learning rate too small. Stopping.\")\n return True\n\n self.learning_rate /= 5.0\n if verbose:\n print(msg + \" Setting learning rate to %f\" % self.learning_rate)\n return False\n\n def _get_updates(self, grads):\n \"\"\"Get the values used to update params with given gradients\n\n Parameters\n ----------\n grads : list, length = len(coefs_) + len(intercepts_)\n Containing gradients with respect to coefs_ and intercepts_ in MLP\n model. So length should be aligned with params\n\n Returns\n -------\n updates : list, length = len(grads)\n The values to add to params\n \"\"\"\n updates = [\n self.momentum * velocity - self.learning_rate * grad\n for velocity, grad in zip(self.velocities, grads)\n ]\n self.velocities = updates\n\n if self.nesterov:\n updates = [\n self.momentum * velocity - self.learning_rate * grad\n for velocity, grad in zip(self.velocities, grads)\n ]\n\n return updates\n\n\nclass AdamOptimizer(BaseOptimizer):\n \"\"\"Stochastic gradient descent optimizer with Adam\n\n Note: All default values are from the original Adam paper\n\n Parameters\n ----------\n params : list, length = len(coefs_) + len(intercepts_)\n The concatenated list containing coefs_ and intercepts_ in MLP model.\n Used for initializing velocities and updating params\n\n learning_rate_init : float, default=0.001\n The initial learning rate used. It controls the step-size in updating\n the weights\n\n beta_1 : float, default=0.9\n Exponential decay rate for estimates of first moment vector, should be\n in [0, 1)\n\n beta_2 : float, default=0.999\n Exponential decay rate for estimates of second moment vector, should be\n in [0, 1)\n\n epsilon : float, default=1e-8\n Value for numerical stability\n\n Attributes\n ----------\n learning_rate : float\n The current learning rate\n\n t : int\n Timestep\n\n ms : list, length = len(params)\n First moment vectors\n\n vs : list, length = len(params)\n Second moment vectors\n\n References\n ----------\n Kingma, Diederik, and Jimmy Ba.\n \"Adam: A method for stochastic optimization.\"\n arXiv preprint arXiv:1412.6980 (2014).\n \"\"\"\n\n def __init__(\n self, params, learning_rate_init=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8\n ):\n super().__init__(params, learning_rate_init)\n\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.t = 0\n self.ms = [np.zeros_like(param) for param in params]\n self.vs = [np.zeros_like(param) for param in params]\n\n def _get_updates(self, grads):\n \"\"\"Get the values used to update params with given gradients\n\n Parameters\n ----------\n grads : list, length = len(coefs_) + len(intercepts_)\n Containing gradients with respect to coefs_ and intercepts_ in MLP\n model. So length should be aligned with params\n\n Returns\n -------\n updates : list, length = len(grads)\n The values to add to params\n \"\"\"\n self.t += 1\n self.ms = [\n self.beta_1 * m + (1 - self.beta_1) * grad\n for m, grad in zip(self.ms, grads)\n ]\n self.vs = [\n self.beta_2 * v + (1 - self.beta_2) * (grad ** 2)\n for v, grad in zip(self.vs, grads)\n ]\n self.learning_rate = (\n self.learning_rate_init\n * np.sqrt(1 - self.beta_2 ** self.t)\n / (1 - self.beta_1 ** self.t)\n )\n updates = [\n -self.learning_rate * m / (np.sqrt(v) + self.epsilon)\n for m, v in zip(self.ms, self.vs)\n ]\n return updates\n", "\"\"\"\n=================================================\nPixel importances with a parallel forest of trees\n=================================================\n\nThis example shows the use of a forest of trees to evaluate the impurity\nbased importance of the pixels in an image classification task on the faces\ndataset. The hotter the pixel, the more important it is.\n\nThe code below also illustrates how the construction and the computation\nof the predictions can be parallelized within multiple jobs.\n\"\"\"\n# %%\nprint(__doc__)\n\n# %%\n# Loading the data and model fitting\n# ----------------------------------\n# First, we load the olivetti faces dataset and limit the dataset to contain\n# only the first five classes. Then we train a random forest on the dataset\n# and evaluate the impurity-based feature importance. One drawback of this\n# method is that it cannot be evaluated on a separate test set. For this\n# example, we are interested in representing the information learned from\n# the full dataset. Also, we'll set the number of cores to use for the tasks.\nfrom sklearn.datasets import fetch_olivetti_faces\n\n# %%\n# We select the number of cores to use to perform parallel fitting of\n# the forest model. `-1` means use all available cores.\nn_jobs = -1\n\n# %%\n# Load the faces dataset\ndata = fetch_olivetti_faces()\nX, y = data.data, data.target\n\n# %%\n# Limit the dataset to 5 classes.\nmask = y < 5\nX = X[mask]\ny = y[mask]\n\n# %%\n# A random forest classifier will be fitted to compute the feature importances.\nfrom sklearn.ensemble import RandomForestClassifier\n\nforest = RandomForestClassifier(\n n_estimators=750, n_jobs=n_jobs, random_state=42)\n\nforest.fit(X, y)\n\n# %%\n# Feature importance based on mean decrease in impurity (MDI)\n# -----------------------------------------------------------\n# Feature importances are provided by the fitted attribute\n# `feature_importances_` and they are computed as the mean and standard\n# deviation of accumulation of the impurity decrease within each tree.\n#\n# .. warning::\n# Impurity-based feature importances can be misleading for high cardinality\n# features (many unique values). See :ref:`permutation_importance` as\n# an alternative.\nimport time\nimport matplotlib.pyplot as plt\n\nstart_time = time.time()\nimg_shape = data.images[0].shape\nimportances = forest.feature_importances_\nelapsed_time = time.time() - start_time\n\nprint(f\"Elapsed time to compute the importances: \"\n f\"{elapsed_time:.3f} seconds\")\nimp_reshaped = importances.reshape(img_shape)\nplt.matshow(imp_reshaped, cmap=plt.cm.hot)\nplt.title(\"Pixel importances using impurity values\")\nplt.colorbar()\nplt.show()\n\n# %%\n# Can you still recognize a face?\n\n# %%\n# The limitations of MDI is not a problem for this dataset because:\n#\n# 1. All features are (ordered) numeric and will thus not suffer the\n# cardinality bias\n# 2. We are only interested to represent knowledge of the forest acquired\n# on the training set.\n#\n# If these two conditions are not met, it is recommended to instead use\n# the :func:`~sklearn.inspection.permutation_importance`.\n", "\"\"\"Test the california_housing loader, if the data is available,\nor if specifically requested via environment variable\n(e.g. for travis cron job).\"\"\"\nimport pytest\n\nfrom sklearn.datasets.tests.test_common import check_return_X_y\nfrom functools import partial\n\n\ndef test_fetch(fetch_california_housing_fxt):\n data = fetch_california_housing_fxt()\n assert (20640, 8) == data.data.shape\n assert (20640,) == data.target.shape\n\n # test return_X_y option\n fetch_func = partial(fetch_california_housing_fxt)\n check_return_X_y(data, fetch_func)\n\n\ndef test_fetch_asframe(fetch_california_housing_fxt):\n pd = pytest.importorskip(\"pandas\")\n bunch = fetch_california_housing_fxt(as_frame=True)\n frame = bunch.frame\n assert hasattr(bunch, \"frame\") is True\n assert frame.shape == (20640, 9)\n assert isinstance(bunch.data, pd.DataFrame)\n assert isinstance(bunch.target, pd.Series)\n\n\ndef test_pandas_dependency_message(fetch_california_housing_fxt, hide_available_pandas):\n # Check that pandas is imported lazily and that an informative error\n # message is raised when pandas is missing:\n expected_msg = \"fetch_california_housing with as_frame=True requires pandas\"\n with pytest.raises(ImportError, match=expected_msg):\n fetch_california_housing_fxt(as_frame=True)\n", "\"\"\"Test the covtype loader, if the data is available,\nor if specifically requested via environment variable\n(e.g. for travis cron job).\"\"\"\nfrom functools import partial\nimport pytest\nfrom sklearn.datasets.tests.test_common import check_return_X_y\n\n\ndef test_fetch(fetch_covtype_fxt):\n data1 = fetch_covtype_fxt(shuffle=True, random_state=42)\n data2 = fetch_covtype_fxt(shuffle=True, random_state=37)\n\n X1, X2 = data1[\"data\"], data2[\"data\"]\n assert (581012, 54) == X1.shape\n assert X1.shape == X2.shape\n\n assert X1.sum() == X2.sum()\n\n y1, y2 = data1[\"target\"], data2[\"target\"]\n assert (X1.shape[0],) == y1.shape\n assert (X1.shape[0],) == y2.shape\n\n # test return_X_y option\n fetch_func = partial(fetch_covtype_fxt)\n check_return_X_y(data1, fetch_func)\n\n\ndef test_fetch_asframe(fetch_covtype_fxt):\n pytest.importorskip(\"pandas\")\n\n bunch = fetch_covtype_fxt(as_frame=True)\n assert hasattr(bunch, \"frame\")\n frame = bunch.frame\n assert frame.shape == (581012, 55)\n assert bunch.data.shape == (581012, 54)\n assert bunch.target.shape == (581012,)\n\n column_names = set(frame.columns)\n\n # enumerated names are added correctly\n assert set(f\"Wilderness_Area_{i}\" for i in range(4)) < column_names\n assert set(f\"Soil_Type_{i}\" for i in range(40)) < column_names\n\n\ndef test_pandas_dependency_message(fetch_covtype_fxt, hide_available_pandas):\n expected_msg = \"fetch_covtype with as_frame=True requires pandas\"\n with pytest.raises(ImportError, match=expected_msg):\n fetch_covtype_fxt(as_frame=True)\n", "import itertools\nimport pickle\n\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\n\nimport pytest\n\nfrom scipy.spatial.distance import cdist\nfrom sklearn.neighbors import DistanceMetric\nfrom sklearn.neighbors import BallTree\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils._testing import create_memmap_backed_data\nfrom sklearn.utils.fixes import sp_version, parse_version\n\n\ndef dist_func(x1, x2, p):\n return np.sum((x1 - x2) ** p) ** (1.0 / p)\n\n\nrng = check_random_state(0)\nd = 4\nn1 = 20\nn2 = 25\nX1 = rng.random_sample((n1, d)).astype(\"float64\", copy=False)\nX2 = rng.random_sample((n2, d)).astype(\"float64\", copy=False)\n\n[X1_mmap, X2_mmap] = create_memmap_backed_data([X1, X2])\n\n# make boolean arrays: ones and zeros\nX1_bool = X1.round(0)\nX2_bool = X2.round(0)\n\n[X1_bool_mmap, X2_bool_mmap] = create_memmap_backed_data([X1_bool, X2_bool])\n\n\nV = rng.random_sample((d, d))\nVI = np.dot(V, V.T)\n\nBOOL_METRICS = [\n \"matching\",\n \"jaccard\",\n \"dice\",\n \"kulsinski\",\n \"rogerstanimoto\",\n \"russellrao\",\n \"sokalmichener\",\n \"sokalsneath\",\n]\n\nMETRICS_DEFAULT_PARAMS = {\n \"euclidean\": {},\n \"cityblock\": {},\n \"minkowski\": dict(p=(1, 1.5, 2, 3)),\n \"chebyshev\": {},\n \"seuclidean\": dict(V=(rng.random_sample(d),)),\n \"wminkowski\": dict(p=(1, 1.5, 3), w=(rng.random_sample(d),)),\n \"mahalanobis\": dict(VI=(VI,)),\n \"hamming\": {},\n \"canberra\": {},\n \"braycurtis\": {},\n}\n\n\[email protected](\"metric\", METRICS_DEFAULT_PARAMS)\[email protected](\"X1, X2\", [(X1, X2), (X1_mmap, X2_mmap)])\ndef test_cdist(metric, X1, X2):\n argdict = METRICS_DEFAULT_PARAMS[metric]\n keys = argdict.keys()\n for vals in itertools.product(*argdict.values()):\n kwargs = dict(zip(keys, vals))\n if metric == \"mahalanobis\":\n # See: https://github.com/scipy/scipy/issues/13861\n pytest.xfail(\"scipy#13861: cdist with 'mahalanobis' fails onmemmap data\")\n elif metric == \"wminkowski\":\n if sp_version >= parse_version(\"1.8.0\"):\n pytest.skip(\"wminkowski will be removed in SciPy 1.8.0\")\n\n # wminkoski is deprecated in SciPy 1.6.0 and removed in 1.8.0\n ExceptionToAssert = None\n if sp_version >= parse_version(\"1.6.0\"):\n ExceptionToAssert = DeprecationWarning\n with pytest.warns(ExceptionToAssert):\n D_true = cdist(X1, X2, metric, **kwargs)\n else:\n D_true = cdist(X1, X2, metric, **kwargs)\n\n check_cdist(metric, kwargs, D_true)\n\n\[email protected](\"metric\", BOOL_METRICS)\[email protected](\n \"X1_bool, X2_bool\", [(X1_bool, X2_bool), (X1_bool_mmap, X2_bool_mmap)]\n)\ndef test_cdist_bool_metric(metric, X1_bool, X2_bool):\n D_true = cdist(X1_bool, X2_bool, metric)\n check_cdist_bool(metric, D_true)\n\n\ndef check_cdist(metric, kwargs, D_true):\n dm = DistanceMetric.get_metric(metric, **kwargs)\n D12 = dm.pairwise(X1, X2)\n assert_array_almost_equal(D12, D_true)\n\n\ndef check_cdist_bool(metric, D_true):\n dm = DistanceMetric.get_metric(metric)\n D12 = dm.pairwise(X1_bool, X2_bool)\n assert_array_almost_equal(D12, D_true)\n\n\[email protected](\"metric\", METRICS_DEFAULT_PARAMS)\[email protected](\"X1, X2\", [(X1, X2), (X1_mmap, X2_mmap)])\ndef test_pdist(metric, X1, X2):\n argdict = METRICS_DEFAULT_PARAMS[metric]\n keys = argdict.keys()\n for vals in itertools.product(*argdict.values()):\n kwargs = dict(zip(keys, vals))\n if metric == \"mahalanobis\":\n # See: https://github.com/scipy/scipy/issues/13861\n pytest.xfail(\"scipy#13861: pdist with 'mahalanobis' fails onmemmap data\")\n elif metric == \"wminkowski\":\n if sp_version >= parse_version(\"1.8.0\"):\n pytest.skip(\"wminkowski will be removed in SciPy 1.8.0\")\n\n # wminkoski is deprecated in SciPy 1.6.0 and removed in 1.8.0\n ExceptionToAssert = None\n if sp_version >= parse_version(\"1.6.0\"):\n ExceptionToAssert = DeprecationWarning\n with pytest.warns(ExceptionToAssert):\n D_true = cdist(X1, X1, metric, **kwargs)\n else:\n D_true = cdist(X1, X1, metric, **kwargs)\n\n check_pdist(metric, kwargs, D_true)\n\n\[email protected](\"metric\", BOOL_METRICS)\[email protected](\"X1_bool\", [X1_bool, X1_bool_mmap])\ndef test_pdist_bool_metrics(metric, X1_bool):\n D_true = cdist(X1_bool, X1_bool, metric)\n check_pdist_bool(metric, D_true)\n\n\ndef check_pdist(metric, kwargs, D_true):\n dm = DistanceMetric.get_metric(metric, **kwargs)\n D12 = dm.pairwise(X1)\n assert_array_almost_equal(D12, D_true)\n\n\ndef check_pdist_bool(metric, D_true):\n dm = DistanceMetric.get_metric(metric)\n D12 = dm.pairwise(X1_bool)\n # Based on https://github.com/scipy/scipy/pull/7373\n # When comparing two all-zero vectors, scipy>=1.2.0 jaccard metric\n # was changed to return 0, instead of nan.\n if metric == \"jaccard\" and sp_version < parse_version(\"1.2.0\"):\n D_true[np.isnan(D_true)] = 0\n assert_array_almost_equal(D12, D_true)\n\n\[email protected](\"metric\", METRICS_DEFAULT_PARAMS)\ndef test_pickle(metric):\n argdict = METRICS_DEFAULT_PARAMS[metric]\n keys = argdict.keys()\n for vals in itertools.product(*argdict.values()):\n kwargs = dict(zip(keys, vals))\n check_pickle(metric, kwargs)\n\n\[email protected](\"metric\", BOOL_METRICS)\[email protected](\"X1_bool\", [X1_bool, X1_bool_mmap])\ndef test_pickle_bool_metrics(metric, X1_bool):\n dm = DistanceMetric.get_metric(metric)\n D1 = dm.pairwise(X1_bool)\n dm2 = pickle.loads(pickle.dumps(dm))\n D2 = dm2.pairwise(X1_bool)\n assert_array_almost_equal(D1, D2)\n\n\ndef check_pickle(metric, kwargs):\n dm = DistanceMetric.get_metric(metric, **kwargs)\n D1 = dm.pairwise(X1)\n dm2 = pickle.loads(pickle.dumps(dm))\n D2 = dm2.pairwise(X1)\n assert_array_almost_equal(D1, D2)\n\n\ndef test_haversine_metric():\n def haversine_slow(x1, x2):\n return 2 * np.arcsin(\n np.sqrt(\n np.sin(0.5 * (x1[0] - x2[0])) ** 2\n + np.cos(x1[0]) * np.cos(x2[0]) * np.sin(0.5 * (x1[1] - x2[1])) ** 2\n )\n )\n\n X = np.random.random((10, 2))\n\n haversine = DistanceMetric.get_metric(\"haversine\")\n\n D1 = haversine.pairwise(X)\n D2 = np.zeros_like(D1)\n for i, x1 in enumerate(X):\n for j, x2 in enumerate(X):\n D2[i, j] = haversine_slow(x1, x2)\n\n assert_array_almost_equal(D1, D2)\n assert_array_almost_equal(haversine.dist_to_rdist(D1), np.sin(0.5 * D2) ** 2)\n\n\ndef test_pyfunc_metric():\n X = np.random.random((10, 3))\n\n euclidean = DistanceMetric.get_metric(\"euclidean\")\n pyfunc = DistanceMetric.get_metric(\"pyfunc\", func=dist_func, p=2)\n\n # Check if both callable metric and predefined metric initialized\n # DistanceMetric object is picklable\n euclidean_pkl = pickle.loads(pickle.dumps(euclidean))\n pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))\n\n D1 = euclidean.pairwise(X)\n D2 = pyfunc.pairwise(X)\n\n D1_pkl = euclidean_pkl.pairwise(X)\n D2_pkl = pyfunc_pkl.pairwise(X)\n\n assert_array_almost_equal(D1, D2)\n assert_array_almost_equal(D1_pkl, D2_pkl)\n\n\ndef test_bad_pyfunc_metric():\n def wrong_distance(x, y):\n return \"1\"\n\n X = np.ones((5, 2))\n msg = \"Custom distance function must accept two vectors\"\n with pytest.raises(TypeError, match=msg):\n BallTree(X, metric=wrong_distance)\n\n\ndef test_input_data_size():\n # Regression test for #6288\n # Previously, a metric requiring a particular input dimension would fail\n def custom_metric(x, y):\n assert x.shape[0] == 3\n return np.sum((x - y) ** 2)\n\n rng = check_random_state(0)\n X = rng.rand(10, 3)\n\n pyfunc = DistanceMetric.get_metric(\"pyfunc\", func=custom_metric)\n eucl = DistanceMetric.get_metric(\"euclidean\")\n assert_array_almost_equal(pyfunc.pairwise(X), eucl.pairwise(X) ** 2)\n" ]
[ [ "numpy.linspace", "numpy.asarray", "sklearn.preprocessing.PolynomialFeatures", "scipy.sparse.random", "sklearn.preprocessing.PolynomialFeatures._combinations", "numpy.hstack", "numpy.ones_like", "numpy.arange", "numpy.sin", "sklearn.preprocessing.SplineTransformer", "numpy.diff", "scipy.sparse.csc_matrix", "scipy.sparse.csr_matrix", "sklearn.utils.fixes.parse_version", "sklearn.utils.fixes.linspace", "numpy.testing.assert_allclose", "numpy.array", "numpy.random.RandomState", "numpy.abs", "numpy.isfortran", "sklearn.preprocessing.KBinsDiscretizer", "numpy.testing.assert_array_equal", "sklearn.utils._testing.assert_array_almost_equal", "sklearn.linear_model.LinearRegression" ], [ "numpy.ma.getdata", "numpy.all", "numpy.ma.getmask", "scipy.sparse.issparse", "numpy.ma.getmaskarray", "numpy.arange", "numpy.flatnonzero", "numpy.full", "numpy.diff", "numpy.repeat", "numpy.zeros", "numpy.logical_not", "scipy.sparse.csc_matrix", "numpy.ma.median", "numpy.ma.mean", "numpy.sum", "scipy.stats.mode", "numpy.setdiff1d", "numpy.ma.masked_array", "numpy.empty" ], [ "sklearn.datasets.make_classification", "sklearn.model_selection.LeavePGroupsOut", "sklearn.model_selection.KFold", "sklearn.model_selection._search_successive_halving._top_k", "numpy.all", "sklearn.model_selection.HalvingRandomSearchCV", "sklearn.svm.LinearSVC", "sklearn.model_selection.ShuffleSplit", "numpy.unique", "sklearn.model_selection.StratifiedKFold", "sklearn.model_selection.LeaveOneGroupOut", "scipy.stats.randint", "sklearn.model_selection.StratifiedShuffleSplit", "sklearn.model_selection.GroupShuffleSplit", "numpy.array", "numpy.random.RandomState", "sklearn.model_selection.GroupKFold", "sklearn.model_selection.HalvingGridSearchCV", "numpy.empty" ], [ "sklearn.model_selection.GridSearchCV", "numpy.sqrt", "matplotlib.pyplot.title", "numpy.linspace", "numpy.abs", "sklearn.datasets.make_moons", "numpy.arange", "matplotlib.pyplot.subplots", "pandas.DataFrame", "sklearn.model_selection.RepeatedStratifiedKFold", "numpy.mean", "sklearn.svm.SVC", "numpy.var", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.zeros_like", "numpy.sqrt" ], [ "matplotlib.pyplot.title", "sklearn.ensemble.RandomForestClassifier", "sklearn.datasets.fetch_olivetti_faces", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.matshow", "matplotlib.pyplot.show" ], [ "sklearn.datasets.tests.test_common.check_return_X_y" ], [ "sklearn.datasets.tests.test_common.check_return_X_y" ], [ "numpy.dot", "sklearn.neighbors.BallTree", "numpy.random.random", "numpy.testing.assert_array_almost_equal", "numpy.sum", "numpy.isnan", "scipy.spatial.distance.cdist", "numpy.cos", "numpy.ones", "numpy.sin", "numpy.zeros_like", "sklearn.utils._testing.create_memmap_backed_data", "sklearn.utils.fixes.parse_version", "sklearn.utils.check_random_state", "sklearn.neighbors.DistanceMetric.get_metric" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
tsoonjin/selam
[ "fbbb355490271bf09056e05b23245be1b75ae24d" ]
[ "selam/prepdata.py" ]
[ "#!/bin/bash\nimport os\nimport sys\nimport random\nimport cv2\nimport numpy as np\nimport xgboost as xgb\n\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA, NMF\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom selam.utils import img\n\n\ndef sample_negative(img, rect, n=1, size=(100, 100)):\n \"\"\" Sample n negative samples randomly\n @param rect: [x1, y1, x2, y2]\n @param n: number of negative samples\n @param size: size of negative window\n \"\"\"\n samples = []\n maxHeight, maxWidth = img.shape[:-1]\n width = abs(rect[0] - rect[2])\n height = abs(rect[1] - rect[3])\n while len(samples) != n:\n tmpX = int(random.random() * (maxWidth - width))\n tmpY = int(random.random() * (maxHeight - height))\n isNotOverlapX = tmpX + width < rect[0] or tmpX > rect[2]\n isNotOverlapY = tmpY + height < rect[1] or tmpY > rect[3]\n # Only accepts sample that does not overlap with ground truth\n if isNotOverlapX and isNotOverlapY:\n samples.append(cv2.resize(\n img[tmpY: tmpY + height, tmpX: tmpX + width], size))\n return samples\n\ndef get_roi(img, rect, size=(100, 100)):\n \"\"\" Return extracted bounding box given 4 corners of a rectangle\n size: size of training image\n @return roi, [x1, y1, x2, y2]\n \"\"\"\n xpos = rect[0::2]\n ypos = rect[1::2]\n y = [int(min(ypos)), int(max(ypos))]\n x = [int(min(xpos)), int(max(xpos))]\n roi = img[y[0]:y[1], x[0]:x[1]]\n return cv2.resize(roi, size), [x[0], y[0], x[1], y[1]]\n\n\ndef get_jpgs(dirpath, skip=0, resize=None):\n \"\"\" Returns all images located in given dirpath\n skip : number of frames skip to reduce computation time\n resize: scale factor for resize\n\n \"\"\"\n filenames = os.listdir(dirpath)\n # Only attempt to parse and sort files that end with .jpg\n filenames = [filename for filename in filenames\n if filename.endswith(\".jpg\") or filename.endswith(\".png\")]\n filenames.sort(key=lambda x: int(x.split('.', 1)[0]))\n frames = [cv2.imread('{}/{}'.format(dirpath, filename))\n for filename in filenames]\n out = frames[0::skip] if skip > 0 else frames\n print('Read {} images from {}'.format(len(out), dirpath))\n if resize:\n new_size = (out[0].shape[1] / resize, out[0].shape[0] / resize)\n return map(lambda x: cv2.resize(x, new_size), out)\n return out\n\n\ndef extract_training(dataset_path, annotation):\n \"\"\" Returns a list of labelled images as positive training data\n Uses default size of 100 x 100 as training patch\n @return positive samples, negative samples\n \"\"\"\n positives = []\n negatives = []\n imgs = get_jpgs(dataset_path)\n with open(annotation) as ann:\n for i, label in zip(imgs, ann):\n rect = map(float, label.rstrip().split(','))\n if rect[0] > 0:\n roi, coord = get_roi(i, rect)\n negatives.extend(sample_negative(i, coord))\n positives.append(roi)\n print(\"{} positive samples\".format(len(positives)))\n print(\"{} negative samples\".format(len(negatives)))\n return positives, negatives\n\n\ndef augment_data(imgs, augment_dir, prefix, n=20):\n \"\"\" Augment imgs with various transformations \n @param augment_dir: directory to save augmented images\n @param prefix: prefix of filename\n @param n: number of transformations per image\n \"\"\"\n n_samples = len(imgs)\n datagen = ImageDataGenerator(\n rotation_range=90,\n width_shift_range=0.2,\n height_shift_range=0.2,\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\n\n for i in imgs:\n selected = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)\n selected = selected.reshape((1, ) + selected.shape)\n for x, batch in enumerate(datagen.flow(selected, batch_size=1,\n save_to_dir=augment_dir,\n save_prefix=prefix,\n save_format='jpeg')):\n if x > n:\n break\n\n\ndef kfold(x, y, eval_size=0.10):\n \"\"\" Split dataset into training set and validation set\n @param eval_size: percentage of data used for evaluation\n @return X_train, X_valid, Y_train, Y_valid\n \"\"\"\n return train_test_split(x, y, test_size=eval_size, random_state=0)\n\n\ndef std_zscore(X):\n \"\"\" Z-score standardization by subtracting mean and divided by standard\n deviation of dataset\n \"\"\"\n scaler = preprocessing.StandardScaler().fit(X)\n return scaler.transform(X)\n\n\ndef std_minmax(X):\n scaler = preprocessing.MinMaxScaler().fit(X)\n return scaler.transform(X)\n\n\ndef reduce_pca(X, h, w, n=15, display=True):\n \"\"\" Performs PCA decomposition using n components \"\"\"\n pca = PCA(n_components=n, svd_solver='randomized',\n whiten=True).fit(X)\n eigenfaces = pca.components_.reshape((n, h, w, -1))\n if display:\n for i in eigenfaces:\n cv2.imshow('PC', np.uint8(img.normUnity(np.mean(i, axis=2)) * 255))\n cv2.waitKey(0)\n return pca.transform(X)\n\n\ndef reduce_nmf(X, h, w, n=15, display=False):\n \"\"\" Performs Non-negative matrix factorization using n components \"\"\"\n model = NMF(n_components=n, init='random', random_state=0).fit(X)\n components = model.components_.reshape((n, h, w, -1))\n if display:\n for i in components:\n cv2.imshow('PC', np.uint8(img.normUnity(np.mean(i, axis=2)) * 255))\n cv2.waitKey(0)\n return model.transform(X)\n\n\ndef classify_svm(X_train, Y_train):\n param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],\n 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}\n clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)\n clf.fit(X_train, Y_train)\n return clf\n\n\ndef classify_rf(X_train, Y_train):\n param_grid = {'n_estimators': [50, 200, 700],\n 'max_features': ['auto', 'sqrt', 'log2']}\n clf = GridSearchCV(RandomForestClassifier(n_estimators=500, oob_score=True), param_grid)\n clf.fit(X_train, Y_train)\n return clf\n\n\ndef classify_gp(X, Y):\n # Using same lengthscale for all features\n kernel = 1.0 * RBF([1.0])\n gpc_rbf = GaussianProcessClassifier(kernel=kernel).fit(X, Y)\n return gpc_rbf\n\n\ndef classify_xgb(X, Y):\n xgb_model = xgb.XGBClassifier()\n parameters = {'nthread':[4], #when use hyperthread, xgboost may become slower\n\t\t 'objective':['binary:logistic'],\n\t\t 'learning_rate': [0.05], #so called `eta` value\n\t\t 'max_depth': [6],\n\t\t 'min_child_weight': [11],\n\t\t 'silent': [1],\n\t\t 'subsample': [0.8],\n\t\t 'colsample_bytree': [0.7],\n\t\t 'n_estimators': [5], #number of trees, change it to 1000 for better results\n\t\t 'missing':[-999],\n\t\t 'seed': [1337]}\n clf = GridSearchCV(xgb_model, parameters)\n clf.fit(X, Y)\n return clf\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 4:\n print(\"Usage: python extract_region.py <dataset directory> <annotation file> <prefix> \\n\")\n exit()\n positives, negatives = extract_training(sys.argv[1], sys.argv[2])\n" ]
[ [ "sklearn.model_selection.GridSearchCV", "sklearn.decomposition.NMF", "sklearn.ensemble.RandomForestClassifier", "sklearn.model_selection.train_test_split", "numpy.mean", "sklearn.svm.SVC", "sklearn.gaussian_process.GaussianProcessClassifier", "sklearn.preprocessing.StandardScaler", "sklearn.gaussian_process.kernels.RBF", "sklearn.decomposition.PCA", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
michalnand/reinforcement_learning_agents
[ "45f02c23b1135c87311dce5a52f6e643e4313fc3", "45f02c23b1135c87311dce5a52f6e643e4313fc3", "45f02c23b1135c87311dce5a52f6e643e4313fc3", "45f02c23b1135c87311dce5a52f6e643e4313fc3" ]
[ "RLAgents/lib_common/WrapperSuperMario.py", "RLAgents/lib_agents/AgentPPORND.py", "RLAgents/lib_common/RLStatsCompute.py", "RLAgents/lib_agents/AgentDQNDuel.py" ]
[ "import gym\nimport numpy\nfrom PIL import Image\n\nfrom nes_py.wrappers import JoypadSpace\nfrom gym_super_mario_bros.actions import COMPLEX_MOVEMENT\n\nclass NopOpsEnv(gym.Wrapper):\n def __init__(self, env=None, max_count=30):\n super(NopOpsEnv, self).__init__(env)\n self.max_count = max_count\n\n def reset(self):\n self.env.reset()\n\n noops = numpy.random.randint(1, self.max_count + 1)\n \n for _ in range(noops):\n obs, _, _, _ = self.env.step(0)\n \n return obs\n\n\nclass SkipEnv(gym.Wrapper):\n def __init__(self, env, skip = 4):\n gym.Wrapper.__init__(self, env)\n self._skip = skip\n\n def step(self, action):\n total_reward = 0.0\n done = None\n for i in range(self._skip):\n state, reward, done, info = self.env.step(action)\n total_reward+= reward\n if done:\n break\n\n return state, total_reward, done, info\n\n\nclass ResizeEnv(gym.ObservationWrapper):\n def __init__(self, env, height = 96, width = 96, frame_stacking = 4):\n super(ResizeEnv, self).__init__(env)\n self.height = height\n self.width = width\n self.frame_stacking = frame_stacking\n\n state_shape = (self.frame_stacking, self.height, self.width)\n self.dtype = numpy.float32\n\n self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=state_shape, dtype=self.dtype)\n self.state = numpy.zeros(state_shape, dtype=self.dtype)\n\n def observation(self, state):\n img = Image.fromarray(state)\n img = img.convert('L')\n img = img.resize((self.height, self.width))\n\n for i in reversed(range(self.frame_stacking-1)):\n self.state[i+1] = self.state[i].copy()\n self.state[0] = numpy.array(img).astype(self.dtype)/255.0\n\n return self.state\n\n\n\n\nclass ClipRewardEnv(gym.Wrapper):\n def __init__(self, env, no_rewards = False):\n gym.Wrapper.__init__(self, env)\n\n self.raw_episodes = 0\n self.raw_score = 0.0\n self.raw_score_per_episode = 0.0\n self.raw_score_total = 0.0 \n self.no_rewards = no_rewards\n\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n\n self.raw_score+= reward\n self.raw_score_total+= reward\n\n if done:\n self.raw_episodes+= 1\n k = 0.1\n self.raw_score_per_episode = (1.0 - k)*self.raw_score_per_episode + k*self.raw_score\n self.raw_score = 0.0\n\n reward = reward/15.0\n\n if self.no_rewards:\n reward = 0.0\n\n return obs, reward, done, info\n\n\n\ndef WrapperSuperMario(env, height = 96, width = 96, frame_stacking=4, frame_skipping=4):\n env = JoypadSpace(env, COMPLEX_MOVEMENT)\n \n env = NopOpsEnv(env)\n env = SkipEnv(env, frame_skipping)\n env = ResizeEnv(env, height, width, frame_stacking)\n env = ClipRewardEnv(env, False)\n\n env.reset()\n\n return env\n\ndef WrapperSuperMarioNoRewards(env, height = 96, width = 96, frame_stacking=4, frame_skipping=4):\n env = JoypadSpace(env, COMPLEX_MOVEMENT)\n \n env = NopOpsEnv(env)\n env = SkipEnv(env, frame_skipping)\n env = ResizeEnv(env, height, width, frame_stacking)\n env = ClipRewardEnv(env, True)\n\n env.reset()\n\n return env", "import numpy\nimport torch \nfrom .PolicyBufferIM import * \nfrom .RunningStats import * \n \nclass AgentPPORND(): \n def __init__(self, envs, ModelPPO, ModelRND, config):\n self.envs = envs \n \n self.gamma_ext = config.gamma_ext \n self.gamma_int = config.gamma_int\n \n self.ext_adv_coeff = config.ext_adv_coeff\n self.int_adv_coeff = config.int_adv_coeff\n \n self.entropy_beta = config.entropy_beta\n self.eps_clip = config.eps_clip \n \n self.steps = config.steps\n self.batch_size = config.batch_size \n \n self.training_epochs = config.training_epochs\n self.envs_count = config.envs_count \n\n\n self.normalise_state_std = config.normalise_state_std\n self.normalise_im_std = config.normalise_im_std\n\n self.state_shape = self.envs.observation_space.shape\n self.actions_count = self.envs.action_space.n\n\n self.model_ppo = ModelPPO.Model(self.state_shape, self.actions_count)\n self.optimizer_ppo = torch.optim.Adam(self.model_ppo.parameters(), lr=config.learning_rate_ppo)\n \n self.model_rnd = ModelRND.Model(self.state_shape)\n self.optimizer_rnd = torch.optim.Adam(self.model_rnd.parameters(), lr=config.learning_rate_rnd)\n \n self.policy_buffer = PolicyBufferIM(self.steps, self.state_shape, self.actions_count, self.envs_count, self.model_ppo.device, True)\n\n for e in range(self.envs_count):\n self.envs.reset(e)\n \n self.states_running_stats = RunningStats(self.state_shape)\n self.rewards_int_running_stats = RunningStats((1, ))\n\n self._init_running_stats()\n\n #reset envs and fill initial state\n self.states = numpy.zeros((self.envs_count, ) + self.state_shape, dtype=numpy.float32)\n for e in range(self.envs_count):\n self.states[e] = self.envs.reset(e).copy()\n\n \n self.enable_training()\n self.iterations = 0 \n\n self.log_loss_rnd = 0.0\n self.log_loss_actor = 0.0\n self.log_loss_critic = 0.0\n\n self.log_internal_motivation_mean = 0.0\n self.log_internal_motivation_std = 0.0\n \n\n def enable_training(self):\n self.enabled_training = True\n \n def disable_training(self):\n self.enabled_training = False\n\n def main(self): \n #state to tensor\n states_t = torch.tensor(self.states, dtype=torch.float).detach().to(self.model_ppo.device)\n\n #compute model output\n logits_t, values_ext_t, values_int_t = self.model_ppo.forward(states_t)\n \n states_np = states_t.detach().to(\"cpu\").numpy()\n logits_np = logits_t.detach().to(\"cpu\").numpy()\n values_ext_np = values_ext_t.squeeze(1).detach().to(\"cpu\").numpy()\n values_int_np = values_int_t.squeeze(1).detach().to(\"cpu\").numpy()\n\n #collect actions\n actions = self._sample_actions(logits_t)\n \n #execute action\n states, rewards_ext, dones, infos = self.envs.step(actions)\n\n self.states = states.copy()\n \n #update long term states mean and variance\n self.states_running_stats.update(states_np)\n\n #curiosity motivation\n rewards_int = self._curiosity(states_t)\n self.rewards_int_running_stats.update(rewards_int)\n\n #normalise internal motivation\n if self.normalise_im_std:\n rewards_int = rewards_int/self.rewards_int_running_stats.std\n \n rewards_int = numpy.clip(rewards_int, 0.0, 1.0)\n \n #put into policy buffer\n if self.enabled_training:\n self.policy_buffer.add(states_np, logits_np, values_ext_np, values_int_np, actions, rewards_ext, rewards_int, dones)\n\n if self.policy_buffer.is_full():\n self.train()\n \n for e in range(self.envs_count): \n if dones[e]:\n self.states[e] = self.envs.reset(e).copy()\n\n #collect stats\n k = 0.02\n self.log_internal_motivation_mean = (1.0 - k)*self.log_internal_motivation_mean + k*rewards_int.mean()\n self.log_internal_motivation_std = (1.0 - k)*self.log_internal_motivation_std + k*rewards_int.std()\n\n self.iterations+= 1\n return rewards_ext[0], dones[0], infos[0]\n \n def save(self, save_path):\n self.model_ppo.save(save_path + \"trained/\")\n self.model_rnd.save(save_path + \"trained/\")\n\n def load(self, load_path):\n self.model_ppo.load(load_path + \"trained/\")\n self.model_rnd.load(load_path + \"trained/\")\n\n def get_log(self): \n result = \"\" \n\n result+= str(round(self.log_loss_rnd, 7)) + \" \"\n result+= str(round(self.log_loss_actor, 7)) + \" \"\n result+= str(round(self.log_loss_critic, 7)) + \" \"\n\n result+= str(round(self.log_internal_motivation_mean, 7)) + \" \"\n result+= str(round(self.log_internal_motivation_std, 7)) + \" \"\n\n return result \n\n '''\n def render(self, env_id, alpha = 0.5):\n size = 256\n state_im = cv2.resize(self.states[env_id][0], (size, size))\n\n states_t = torch.tensor(self.states, dtype=torch.float).detach().to(self.model_ppo.device)\n attention = self.model_ppo.forward_features(states_t).detach().to(\"cpu\").numpy()[0]\n attention_im = cv2.resize(attention, (size, size)) \n \n result_im = numpy.zeros((3, size, size))\n result_im[0] = state_im\n result_im[1] = state_im\n result_im[2] = state_im + 2*attention_im\n result_im = numpy.clip(result_im, 0.0, 1.0)\n\n result_im = numpy.moveaxis(result_im, 0, 2) \n\n #result_vid = (255*result_im).astype(numpy.uint8)\n #self.writer.write(result_vid)\n\n cv2.imshow(\"RND agent\", result_im)\n cv2.waitKey(1)\n '''\n \n\n\n\n def _sample_actions(self, logits):\n action_probs_t = torch.nn.functional.softmax(logits, dim = 1)\n action_distribution_t = torch.distributions.Categorical(action_probs_t)\n action_t = action_distribution_t.sample()\n actions = action_t.detach().to(\"cpu\").numpy()\n return actions\n \n def train(self): \n self.policy_buffer.compute_returns(self.gamma_ext, self.gamma_int)\n\n batch_count = self.steps//self.batch_size\n\n for e in range(self.training_epochs):\n for batch_idx in range(batch_count):\n states, _, logits, actions, returns_ext, returns_int, advantages_ext, advantages_int = self.policy_buffer.sample_batch(self.batch_size, self.model_ppo.device)\n\n #train PPO model\n loss_ppo = self._compute_loss_ppo(states, logits, actions, returns_ext, returns_int, advantages_ext, advantages_int)\n\n self.optimizer_ppo.zero_grad() \n loss_ppo.backward()\n torch.nn.utils.clip_grad_norm_(self.model_ppo.parameters(), max_norm=0.5)\n self.optimizer_ppo.step()\n\n #train RND model, MSE loss\n loss_rnd = self._compute_loss_rnd(states)\n\n self.optimizer_rnd.zero_grad() \n loss_rnd.backward()\n self.optimizer_rnd.step()\n\n k = 0.02\n self.log_loss_rnd = (1.0 - k)*self.log_loss_rnd + k*loss_rnd.detach().to(\"cpu\").numpy()\n\n self.policy_buffer.clear() \n\n \n def _compute_loss_ppo(self, states, logits, actions, returns_ext, returns_int, advantages_ext, advantages_int):\n logits_new, values_ext_new, values_int_new = self.model_ppo.forward(states)\n\n #critic loss\n loss_critic = self._compute_critic_loss(values_ext_new, returns_ext, values_int_new, returns_int)\n\n #actor loss \n advantages = self.ext_adv_coeff*advantages_ext + self.int_adv_coeff*advantages_int\n advantages = advantages.detach() \n loss_policy, loss_entropy = self._compute_actor_loss(logits, logits_new, advantages, actions)\n\n loss_actor = loss_policy + loss_entropy\n \n #total loss\n loss = 0.5*loss_critic + loss_actor\n\n #store to log\n k = 0.02\n self.log_loss_actor = (1.0 - k)*self.log_loss_actor + k*loss_actor.mean().detach().to(\"cpu\").numpy()\n self.log_loss_critic = (1.0 - k)*self.log_loss_critic + k*loss_critic.mean().detach().to(\"cpu\").numpy()\n\n return loss \n\n #MSE critic loss\n def _compute_critic_loss(self, values_ext_new, returns_ext, values_int_new, returns_int):\n ''' \n compute external critic loss, as MSE\n L = (T - V(s))^2\n '''\n values_ext_new = values_ext_new.squeeze(1)\n loss_ext_value = (returns_ext.detach() - values_ext_new)**2\n loss_ext_value = loss_ext_value.mean()\n\n '''\n compute internal critic loss, as MSE\n L = (T - V(s))^2\n '''\n values_int_new = values_int_new.squeeze(1)\n loss_int_value = (returns_int.detach() - values_int_new)**2\n loss_int_value = loss_int_value.mean()\n \n loss_critic = loss_ext_value + loss_int_value\n return loss_critic\n\n #PPO actor loss\n def _compute_actor_loss(self, logits, logits_new, advantages, actions):\n log_probs_old = torch.nn.functional.log_softmax(logits, dim = 1).detach()\n\n probs_new = torch.nn.functional.softmax(logits_new, dim = 1)\n log_probs_new = torch.nn.functional.log_softmax(logits_new, dim = 1)\n\n ''' \n compute actor loss, surrogate loss\n '''\n log_probs_new_ = log_probs_new[range(len(log_probs_new)), actions]\n log_probs_old_ = log_probs_old[range(len(log_probs_old)), actions]\n \n ratio = torch.exp(log_probs_new_ - log_probs_old_)\n p1 = ratio*advantages\n p2 = torch.clamp(ratio, 1.0 - self.eps_clip, 1.0 + self.eps_clip)*advantages\n loss_policy = -torch.min(p1, p2) \n loss_policy = loss_policy.mean()\n \n ''' \n compute entropy loss, to avoid greedy strategy\n L = beta*H(pi(s)) = beta*pi(s)*log(pi(s))\n '''\n loss_entropy = (probs_new*log_probs_new).sum(dim = 1)\n loss_entropy = self.entropy_beta*loss_entropy.mean()\n\n return loss_policy, loss_entropy\n\n\n #MSE loss for RND model\n def _compute_loss_rnd(self, states):\n \n state_norm_t = self._norm_state(states).detach()\n \n features_predicted_t, features_target_t = self.model_rnd(state_norm_t)\n\n loss_rnd = (features_target_t - features_predicted_t)**2\n\n #random loss regularisation, 25% non zero for 128envs, 100% non zero for 32envs\n prob = 32.0/self.envs_count\n random_mask = torch.rand(loss_rnd.shape).to(loss_rnd.device)\n random_mask = 1.0*(random_mask < prob) \n loss_rnd = (loss_rnd*random_mask).sum() / (random_mask.sum() + 0.00000001)\n\n return loss_rnd\n \n #compute internal motivation\n def _curiosity(self, state_t):\n state_norm_t = self._norm_state(state_t)\n\n features_predicted_t, features_target_t = self.model_rnd(state_norm_t)\n\n curiosity_t = (features_target_t - features_predicted_t)**2\n curiosity_t = curiosity_t.sum(dim=1)/2.0\n \n return curiosity_t.detach().to(\"cpu\").numpy()\n\n\n #normalise mean and std for state\n def _norm_state(self, state_t):\n mean = torch.from_numpy(self.states_running_stats.mean).to(state_t.device).float()\n std = torch.from_numpy(self.states_running_stats.std).to(state_t.device).float()\n \n state_norm_t = state_t - mean\n\n if self.normalise_state_std:\n state_norm_t = torch.clamp(state_norm_t/std, -5.0, 5.0)\n\n return state_norm_t \n\n #random policy for stats init\n def _init_running_stats(self, steps = 256):\n for _ in range(steps):\n #random action\n actions = numpy.random.randint(0, self.actions_count, (self.envs_count))\n states, _, dones, _ = self.envs.step(actions)\n\n #update stats\n self.states_running_stats.update(states)\n\n for e in range(self.envs_count): \n if dones[e]:\n self.envs.reset(e)\n\n", "import re\nimport numpy\nfrom scipy import stats\nimport json\n\nclass RLStatsCompute:\n def __init__(self, files_list, confidence = 0.95):\n self.data, self.extended = self.load_files(files_list)\n self.mean, self.std, self.lower, self.upper, self.hist = self.compute_stats(self.data, confidence)\n\n def load_files(self, files_list):\n data = []\n extended = []\n \n for f in files_list:\n print(\"loading \", f)\n data_ = numpy.loadtxt(f, unpack = True, comments='{')\n data.append(data_)\n\n extended_f = []\n\n with open(f) as file:\n lines = file.readlines()\n\n for line in lines:\n tmp = \"{\" + line.split('{')[1]\n if len(tmp) > 0:\n try:\n tmp = tmp.replace(\"'\", \"\\\"\")\n tmp = json.loads(tmp)\n extended_f.append(tmp)\n except Exception:\n pass\n \n extended.append(extended_f)\n\n data = numpy.array(data)\n \n return data, extended\n \n\n\n def compute_stats(self, data, confidence = 0.95):\n n = data.shape[2]\n\n mean = numpy.mean(data, axis = 0)\n std = numpy.std(data, axis = 0)\n se = stats.sem(data, axis=0)\n h = se * stats.t.ppf((1 + confidence) / 2., n-1)\n\n lower = mean - h\n upper = mean + h\n\n hist = []\n\n for col in range(data.shape[1]):\n h, e = numpy.histogram(data[0][col], bins=64)\n\n e = e[0:-1]\n h = h/numpy.sum(h)\n\n hist.append([e, h])\n\n hist = numpy.array(hist)\n\n return mean, std, lower, upper, hist\n\n", "import numpy\nimport torch\nfrom .ExperienceBuffer import *\n\nimport cv2\n\n\nclass AgentDQNDuel():\n def __init__(self, env, Model, Config):\n self.env = env\n\n config = Config.Config()\n\n self.batch_size = config.batch_size\n self.exploration = config.exploration\n self.gamma = config.gamma\n \n if hasattr(config, \"tau\"):\n self.soft_update = True\n self.tau = config.tau\n elif hasattr(config, \"target_update\"):\n self.soft_update = False\n self.target_update = config.target_update\n else:\n self.soft_update = False\n self.target_update = 10000\n\n if hasattr(config, \"priority_buffer\"):\n self.priority_buffer = True\n else: \n self.priority_buffer = False\n\n self.update_frequency = config.update_frequency \n self.bellman_steps = config.bellman_steps\n \n \n self.state_shape = self.env.observation_space.shape\n self.actions_count = self.env.action_space.n\n\n self.experience_replay = ExperienceBuffer(config.experience_replay_size, self.bellman_steps, self.priority_buffer)\n\n self.model = Model.Model(self.state_shape, self.actions_count)\n self.model_target = Model.Model(self.state_shape, self.actions_count)\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr= config.learning_rate)\n\n for target_param, param in zip(self.model_target.parameters(), self.model.parameters()):\n target_param.data.copy_(param.data)\n\n\n self.black_state = env.reset()\n self.white_state = self.black_state.copy()\n\n self.play_as = \"black\"\n\n self.iterations = 0\n\n self.enable_training()\n\n def enable_training(self):\n self.enabled_training = True\n\n def disable_training(self):\n self.enabled_training = False\n\n\n def main(self):\n if self.enabled_training:\n self.exploration.process()\n self.epsilon = self.exploration.get()\n else:\n self.epsilon = self.exploration.get_testing()\n\n active_player = self.env.get_active_player()\n\n if active_player == \"black\":\n self.white_state_next, reward, done, black_action = self.step_eval(self.black_state)\n else:\n self.black_state_next, reward, done, white_action = self.step_eval(self.white_state)\n reward = -1.0*reward\n \n if done:\n if active_player == \"black\":\n self.train_step(self.white_state_next, reward, True, black_action)\n else:\n self.train_step(self.black_state_next, reward, True, white_action)\n\n else:\n if self.play_as == \"black\" and active_player == \"black\":\n self.train_step(self.black_state, reward, False, black_action)\n\n if self.play_as == \"white\" and active_player == \"white\":\n self.train_step(self.white_state, reward, False, white_action)\n\n if active_player == \"black\":\n self.white_state = self.white_state_next.copy()\n else:\n self.black_state = self.black_state_next.copy()\n\n if done:\n #flip players at the game end\n if self.play_as == \"black\":\n self.play_as = \"white\"\n else:\n self.play_as = \"black\"\n\n self.black_state = self.env.reset()\n self.white_state = self.black_state.copy()\n\n return reward, done\n\n\n \n\n def step_eval(self, state):\n state_t = torch.from_numpy(state).to(self.model.device).unsqueeze(0).float()\n \n q_values_t = self.model(state_t)\n \n q_values = q_values_t.squeeze(0).to(\"cpu\").detach().numpy()\n\n state, reward, done, _, action = self.env.step_e_greedy(q_values, self.epsilon)\n\n return state, reward, done, action\n\n \n def train_step(self, state, reward, done, action): \n if self.enabled_training:\n self.experience_replay.add(state, action, reward, done)\n\n if self.enabled_training and (self.iterations > self.experience_replay.size):\n if self.iterations%self.update_frequency == 0:\n self.train_model()\n \n if self.soft_update:\n for target_param, param in zip(self.model_target.parameters(), self.model.parameters()):\n target_param.data.copy_((1.0 - self.tau)*target_param.data + self.tau*param.data)\n else:\n if self.iterations%self.target_update == 0:\n self.model_target.load_state_dict(self.model.state_dict())\n\n\n self.iterations+= 1\n \n \n\n def train_model(self):\n state_t, action_t, reward_t, state_next_t, done_t = self.experience_replay.sample(self.batch_size, self.model.device)\n\n #q values, state now, state next\n q_predicted = self.model.forward(state_t)\n q_predicted_next = self.model_target.forward(state_next_t)\n\n #compute target, n-step Q-learning\n q_target = q_predicted.clone()\n for j in range(self.batch_size):\n gamma_ = self.gamma\n\n reward_sum = 0.0\n for i in range(self.bellman_steps):\n if done_t[j][i]:\n gamma_ = 0.0\n reward_sum+= reward_t[j][i]*(gamma_**i)\n\n action_idx = action_t[j]\n q_target[j][action_idx] = reward_sum + (gamma_**self.bellman_steps)*torch.max(q_predicted_next[j])\n \n #train DQN model\n loss_ = ((q_target.detach() - q_predicted)**2)\n loss = loss_.mean() \n\n self.optimizer.zero_grad()\n loss.backward()\n for param in self.model.parameters():\n param.grad.data.clamp_(-10.0, 10.0)\n self.optimizer.step()\n\n loss_ = loss_.mean(dim=1).detach().to(\"cpu\").numpy()\n if self.priority_buffer:\n self.experience_replay.set_loss_for_priority(loss_)\n\n def _sample_action(self, state_t, epsilon):\n\n batch_size = state_t.shape[0]\n\n q_values_t = self.model(state_t).to(\"cpu\")\n\n #best actions indices\n q_max_indices_t = torch.argmax(q_values_t, dim = 1)\n\n #random actions indices\n q_random_indices_t = torch.randint(self.actions_count, (batch_size,))\n\n #create mask, which actions will be from q_random_indices_t and which from q_max_indices_t\n select_random_mask_t= torch.tensor((torch.rand(batch_size) < epsilon).clone(), dtype = int)\n\n #apply mask\n action_idx_t = select_random_mask_t*q_random_indices_t + (1 - select_random_mask_t)*q_max_indices_t\n action_idx_t = torch.tensor(action_idx_t, dtype=int)\n\n #create one hot encoding\n action_one_hot_t = torch.zeros((batch_size, self.actions_count))\n action_one_hot_t[range(batch_size), action_idx_t] = 1.0 \n action_one_hot_t = action_one_hot_t.to(self.model.device)\n\n #numpy result\n action_idx_np = action_idx_t.detach().to(\"cpu\").numpy().astype(dtype=int)\n\n return action_idx_np, action_one_hot_t\n\n def save(self, save_path):\n self.model.save(save_path)\n\n def load(self, save_path):\n self.model.load(save_path)\n \n\n\n\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.random.randint" ], [ "torch.nn.functional.softmax", "torch.nn.functional.log_softmax", "numpy.clip", "torch.min", "torch.from_numpy", "torch.tensor", "torch.exp", "torch.distributions.Categorical", "torch.rand", "torch.clamp", "numpy.zeros", "numpy.random.randint" ], [ "scipy.stats.t.ppf", "numpy.std", "numpy.mean", "scipy.stats.sem", "numpy.array", "numpy.histogram", "numpy.sum", "numpy.loadtxt" ], [ "torch.randint", "torch.max", "torch.zeros", "torch.from_numpy", "torch.tensor", "torch.rand", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yuhaoooo/FaceAdv
[ "73e27b7ca01243a9a3d115f5fabd1008b2afb34a", "73e27b7ca01243a9a3d115f5fabd1008b2afb34a" ]
[ "Finetune/cosface_finetune.py", "Video/main.py" ]
[ "import os\nimport torch\nimport random\nimport numpy as np\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms, datasets\nfrom module.units.cosface_module import CosFace\n\n\ndef accuracy(logits, y):\n _, preds = torch.max(logits, 1)\n return (preds == y).float().mean()\n\n\nif __name__ == \"__main__\":\n\n random.seed(117)\n np.random.seed(117)\n torch.manual_seed(117)\n torch.cuda.manual_seed(117)\n\n transform = transforms.Compose([\n transforms.Resize((112, 96)),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n cosface = CosFace(classnum=156, pretrained=r'..\\Auxiliary\\PretrainedFeatureExtractor\\ACC99.28.pth').to(device)\n\n dataset_dir = r'..\\Auxiliary\\ClippedFaceBank'\n dataset = datasets.ImageFolder(\n dataset_dir, transform=transform)\n len_imgs = int(len(dataset) * 0.2)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [len(dataset) - len_imgs, len_imgs])\n\n batch_size = 32\n workers = 0 if os.name == 'nt' else 8\n epochs = 20\n train_loader = DataLoader(\n train_dataset,\n num_workers=workers,\n batch_size=batch_size,\n shuffle=True\n )\n test_loader = DataLoader(\n test_dataset,\n num_workers=workers,\n batch_size=1,\n shuffle=False\n )\n\n optimizer = optim.Adam(cosface.logits.parameters(), lr=1e-3)\n\n loss_fn = torch.nn.CrossEntropyLoss()\n\n cosface.backbone.eval()\n\n best_acc, best_state_dict = 0., {}\n for epoch in range(epochs):\n print('\\nEpoch {}/{}'.format(epoch + 1, epochs))\n print('-' * 10)\n\n cosface.logits.train()\n loss = 0.0\n acc = 0.0\n for i_batch, (x, y) in enumerate(train_loader):\n x = x.to(device)\n y = y.to(device)\n optimizer.zero_grad()\n y_pred = cosface(x)\n loss_batch = loss_fn(y_pred, y)\n # update\n loss_batch.backward()\n optimizer.step()\n loss += loss_batch.detach().cpu().numpy()\n acc += accuracy(y_pred, y).detach().cpu().numpy()\n loss /= (i_batch + 1)\n acc /= (i_batch + 1)\n print('The train loss is {}, The accuracy is {}'.format(loss, acc))\n\n cosface.logits.eval()\n loss, acc = 0.0, 0.0\n for i_batch, (x, y) in enumerate(test_loader):\n x = x.to(device)\n y = y.to(device)\n y_pred = cosface(x)\n loss_batch = loss_fn(y_pred, y)\n # update\n loss += loss_batch.detach().cpu().numpy()\n acc += accuracy(y_pred, y).detach().cpu().numpy()\n loss /= (i_batch + 1)\n acc /= (i_batch + 1)\n print('The test loss is {}, The accuracy is {}'.format(loss, acc))\n\n if best_acc < acc:\n best_acc = acc\n best_state_dict = cosface.state_dict()\n\n os.makedirs(r'..\\Auxiliary\\PretrainedFaceRecognizer', exist_ok=True)\n torch.save(best_state_dict, r'..\\Auxiliary\\PretrainedFaceRecognizer\\finetuned_cosface.pt')\n", "'''\n Attacking Face Recognition Systems for recording videos\n distance : 30cm, 50cm, 70cm\n illuminance: 50lux, 100lux, 150lux\n pose: HN (head normal), HR (head right), HL (head left), HU (head upper), HB (head bottom)\n\n eyeglasses: AGNs attacking method\n target_class: the victim class or the target class, which deponds on the mode\n'''\n\nimport os\nimport sys\nimport cv2\nimport time\nimport torch\nimport argparse\nimport numpy as np\nfrom mtcnn.mtcnn import MTCNN\nimport torch.nn.functional as F\nfrom PIL import Image, ImageDraw, ImageFont\nfrom module.target import ArcFace, CosFace, FaceNet, VggFace\n\n\ndef main(args):\n parser = argparse.ArgumentParser(description='Recording Attacking Video')\n parser.add_argument('--model', type=str, required=True)\n parser.add_argument('--target_class', type=int, required=True)\n parser.add_argument('--mode', type=str, required=True)\n parser.add_argument('--distance', type=int, required=True, help='The distance between user and cameras')\n parser.add_argument('--illuminance', type=int, required=True, help='The illuminance degree')\n parser.add_argument('--pose', type=str, required=True, help='The pose of head')\n parser.add_argument('--transfer', default=False, action='store_true', help='whether to attack the black model')\n args = parser.parse_args(args)\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n cam = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n\n cam.set(3, 1280)\n cam.set(4, 1024)\n cv2.namedWindow('Attacking Face Recognition System')\n success, frame = cam.read()\n saved_frame = frame.copy()\n\n save_path = r'..\\Outputs\\AttackingVideos\\{}_{}_{}'.format(args.model, args.mode, args.target_class)\n if args.transfer:\n save_path = os.path.join(save_path, 'transfer')\n else:\n save_path = os.path.join(save_path, 'normal')\n subdir = '{}_{}_{}'.format(args.distance, args.illuminance, args.pose)\n save_path = os.path.join(save_path, subdir)\n assert os.path.exists(save_path) is False\n if os.path.exists(save_path) is False:\n os.makedirs(save_path)\n\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n fps = 1\n size = (int(cam.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n annotated_out = cv2.VideoWriter(os.path.join(save_path, 'annotated.mp4'), fourcc, fps, size)\n unannotated_out = cv2.VideoWriter(os.path.join(save_path, 'unannotated.mp4'), fourcc, fps, size)\n\n classnum = 156\n if args.model == 'ArcFace':\n img_size = (112, 112)\n target_model = ArcFace(device, r'..\\Auxiliary\\PretrainedFaceRecognizer\\finetuned_arcface.pt', classnum)\n elif args.model == 'CosFace':\n img_size = (112, 96)\n target_model = CosFace(device, r'..\\Auxiliary\\PretrainedFaceRecognizer\\finetuned_cosface.pt', classnum)\n elif args.model == 'FaceNet':\n img_size = (160, 160)\n target_model = FaceNet(device, r'..\\Auxiliary\\PretrainedFaceRecognizer\\finetuned_facenet.pt', classnum)\n elif args.model == 'VggFace':\n img_size = (224, 224)\n target_model = VggFace(device, r'..\\Auxiliary\\PretrainedFaceRecognizer\\finetuned_vggface.pt', classnum)\n else:\n raise Exception('This model is not supported.')\n\n mtcnn = MTCNN(\n image_size=img_size, margin=0, min_face_size=20,\n thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,\n keep_all=True, device=device\n )\n font = ImageFont.truetype(\"consola.ttf\", 18, encoding=\"unic\")\n\n start_time = time.time()\n cnt = 0\n while success:\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n\n boxes, _ = mtcnn.detect(frame)\n faces = mtcnn(frame)\n cnt += 1\n if faces is None:\n cv2.imshow('Attacking Face Recognition System', np.asarray(frame)[..., ::-1])\n end_time = time.time()\n if (end_time - start_time) > 20:\n break\n success, frame = cam.read()\n continue\n\n faces = faces.to(device)\n logit = target_model.forward((faces + 1.0) / 2.0)\n\n id = args.target_class\n prob = F.softmax(logit, dim=1).cpu().detach().numpy()[0, id]\n\n frame_draw = frame.copy()\n draw = ImageDraw.Draw(frame_draw)\n for box in boxes:\n draw.text((box.tolist()[0], box.tolist()[1] - 20), 'Id: %d Conf: %.4f' % (id, prob), (255, 0, 0), font=font)\n draw.rectangle(box.tolist(), outline=(255, 0, 0), width=6)\n\n frame_draw = cv2.cvtColor(np.asarray(frame_draw), cv2.COLOR_RGB2BGR)\n\n annotated_out.write(frame_draw)\n unannotated_out.write(saved_frame)\n\n cv2.imshow('Attacking Face Recognition System', frame_draw)\n end_time = time.time()\n if (end_time - start_time) > 25:\n break\n success, frame = cam.read()\n saved_frame = frame.copy()\n\n cam.release()\n annotated_out.release()\n unannotated_out.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.max", "torch.cuda.manual_seed", "numpy.random.seed", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.cuda.is_available", "torch.save" ], [ "numpy.asarray", "torch.nn.functional.softmax", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hybug/RL_Lab
[ "0748e143a0fb60b9912ca28fbebc25e8f97a2fe4", "0748e143a0fb60b9912ca28fbebc25e8f97a2fe4" ]
[ "examples/PPO_super_mario_bros/env.py", "examples/PPO_super_mario_bros/policy_graph.py" ]
[ "'''\nAuthor: hanyu\nDate: 2020-11-06 13:04:12\nLastEditTime: 2021-01-09 09:07:08\nLastEditors: hanyu\nDescription: environment\nFilePath: /test_ppo/examples/PPO_super_mario_bros/env.py\n'''\nimport logging\nimport numpy as np\n\nfrom collections import namedtuple\n\n\n# todo, to common\ndef padding(input, seqlen, dtype):\n input = np.array(input, dtype=dtype)\n if len(input) >= seqlen:\n return input\n shape = input.shape\n pad = np.tile(\n np.zeros_like(input[0:1], dtype=dtype),\n [seqlen - shape[0]] + (len(shape) - 1) * [1])\n return np.concatenate([input, pad], axis=0)\n\n\nSeg = namedtuple(\"Seg\", [\"s\", \"a\", \"a_logits\",\n \"r\", \"gaes\", \"v_cur\", \"state_in\"])\n\n\ndef _warp_env():\n import random\n from utils.get_gaes import get_gaes\n import gym_super_mario_bros\n from PIL import Image\n from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT\n from nes_py.wrappers import JoypadSpace\n\n class Env(object):\n \"\"\"\n Raw single environment of game\n \"\"\"\n\n def __init__(self, act_space, act_repeats, frames, state_size, burn_in, seqlen, game):\n '''\n description: init basic params settings.\n param {\n act_space: agent act spaces.\n act_repeats: one a repeats number, default as 1.\n frames: stack of frames for each state.\n state_size: state_size calculated in build_policy_evaluator().\n burn_in: sequences length of each burn-in(dropped) segment.\n seqlen: sequences length of each training segment.\n game: game environment.\n }\n return {None}\n '''\n self.act_space = act_space\n self.act_repeats = act_repeats\n self.act_repeat = random.choice(self.act_repeats)\n self.frames = frames\n self.state_size = state_size\n self.game = game\n self.burn_in = burn_in\n self.seqlen = seqlen\n\n self.max_pos = -10000\n\n self.count = 0\n\n # make gym env from gym_super_mario_bros\n env = gym_super_mario_bros.make(game)\n # warp the raw env through JoypadSpace according act_space\n if self.act_space == 7:\n self.env = JoypadSpace(env, SIMPLE_MOVEMENT)\n elif self.act_space == 12:\n self.env = JoypadSpace(env, COMPLEX_MOVEMENT)\n\n # resize the output image to 84*84 & normalize the pixel\n # input: (240, 256, 3)\n # output: (84, 84, 1)\n s_t = self.resize_image(self.env.reset())\n # expand the state dimension\n # output: (84, 84, frames)\n self.s_t = np.tile(s_t, [1, 1, frames])\n # add the batch_size dimension\n # output: (batch_size, 84, 84, frames)\n self.s = [self.s_t]\n\n # action shape: (batch_size, )\n self.a_t = random.randint(0, act_space - 1)\n self.a = [self.a_t]\n # action logits shape: (batch_size, act_space)\n self.a_logits = []\n self.r = [0]\n self.pos = []\n\n self.v_cur = []\n\n # decides according to build_policy_evaluator()\n state_in = np.zeros(self.state_size, dtype=np.float32)\n # state_in shape: (batch_size, state_in_number)\n self.state_in = [state_in]\n\n self.done = False\n\n def step(self, a, a_logits, v_cur, state_in, force=False):\n '''\n description: step function\n param {\n a: step action\n a_logits: action logits\n v_cur: current value\n state_in: state_in\n force: force flag\n }\n return {\n segs: list of [\"s\", \"a\", \"a_logits\", \"r\", \"gaes\", \"v_cur\", \"state_in\"]\n }\n '''\n # repeat the last action or step the current action\n # according to the act_repeat\n self.count += 1\n if self.count % self.act_repeat == 0:\n self.a_t = a\n self.count = 0\n self.act_repeat = random.choice(self.act_repeats)\n\n # step the action and get the result\n gs_t1, gr_t, gdone, ginfo = self.env.step(self.a_t)\n if not gdone:\n s_t1, r_t, done, info = self.env.step(self.a_t)\n r_t += gr_t\n r_t /= 2.\n else:\n s_t1 = gs_t1\n r_t = gr_t\n done = gdone\n info = ginfo\n # reward scaling\n r_t /= 15.\n s_t1 = self.resize_image(s_t1)\n channels = s_t1.shape[-1]\n # concatenate s_t1(the last stacked frame)\n # to self.s_t(drop the first stacked frame)\n self.s_t = np.concatenate(\n [s_t1, self.s_t[:, :, :-channels]], axis=-1)\n\n self.s.append(self.s_t)\n self.a.append(self.a_t)\n self.a_logits.append(a_logits)\n self.r.append(r_t)\n self.max_pos = max(self.max_pos, info[\"x_pos\"])\n self.pos.append(info[\"x_pos\"])\n if (len(self.pos) > 100) and (\n info[\"x_pos\"] - self.pos[-100] < 5) and (\n self.pos[-100] - info[\"x_pos\"] < 5):\n done = True\n self.done = done\n\n self.v_cur.append(v_cur)\n self.state_in.append(state_in)\n\n \"\"\"\n get segs\n \"\"\"\n segs = self.get_history(force)\n\n \"\"\"\n reset env\n \"\"\"\n self.reset(force)\n\n return segs\n\n def reset(self, force=False):\n if self.done or force:\n max_pos = self.max_pos\n self.max_pos = -10000\n print(\" Max Position %s : %d\" % (self.game, max_pos))\n self.count = 0\n self.act_repeat = random.choice(self.act_repeats)\n\n s_t = self.resize_image(self.env.reset())\n\n self.s_t = np.tile(s_t, [1, 1, self.frames])\n self.s = [self.s_t]\n\n self.a_t = random.randint(0, self.act_space - 1)\n self.a = [self.a_t]\n self.a_logits = []\n self.r = [0]\n self.pos = []\n\n self.v_cur = []\n\n state_in = np.zeros(self.state_size, dtype=np.float32)\n self.state_in = [state_in]\n\n self.done = False\n\n def get_state(self):\n return self.s_t\n\n def get_act(self):\n return self.a_t\n\n def get_max_pos(self):\n return self.max_pos\n\n def reset_max_pos(self):\n self.max_pos = -10000\n\n def get_state_in(self):\n return self.state_in[-1]\n\n def get_history(self, force=False):\n if self.done or force:\n if self.done:\n # using Generalized Advantage Estimator estimate Advantage\n gaes, _ = get_gaes(None, self.r, self.v_cur,\n self.v_cur[1:] + [0], 0.99, 0.95)\n seg = Seg(self.s, self.a, self.a_logits, self.r,\n gaes, self.v_cur, self.state_in)\n return self.postprocess(seg)\n if force and len(self.r) > 1:\n gaes, _ = get_gaes(\n None, self.r[:-1], self.v_cur[:-1], self.v_cur[1:], 0.99, 0.95)\n seg = Seg(self.s[:-1], self.a[:-1], self.a_logits[:-1], self.r[:-1], gaes,\n self.v_cur[:-1], self.state_in[:-1])\n return self.postprocess(seg)\n return None\n\n @staticmethod\n def resize_image(image, size=84):\n '''\n description: resize and norm the image\n param {\n image: image of np.array\n size: the size after resize\n }\n return {the image after resize and norm}\n '''\n image = Image.fromarray(image)\n image = image.convert(\"L\")\n image = image.resize((size, size))\n image = np.array(image)\n image = image / 255.\n image = np.array(image, np.float32)\n return image[:, :, None]\n\n def postprocess(self, seg):\n \"\"\"\n postprocess the seg for training,\n split the raw seg into several seqlen segs.\n \"\"\"\n burn_in = self.burn_in\n seqlen = self.seqlen + burn_in\n seg_results = []\n if seg is not None:\n while len(seg[0]) > burn_in:\n next_seg = dict()\n # input: (121(depends on done timing), 84, 84, frames)\n # output: (seqlen, 84, 84, frames)\n next_seg[\"s\"] = padding(seg.s[:seqlen], seqlen, np.float32)\n next_seg[\"a\"] = padding(\n seg.a[1:seqlen + 1], seqlen, np.int32)\n next_seg[\"prev_a\"] = padding(\n seg.a[:seqlen], seqlen, np.int32)\n next_seg[\"a_logits\"] = padding(\n seg.a_logits[:seqlen], seqlen, np.float32)\n next_seg[\"r\"] = padding(\n seg.r[1:seqlen + 1], seqlen, np.float32)\n next_seg[\"prev_r\"] = padding(\n seg.r[:seqlen], seqlen, np.float32)\n next_seg[\"adv\"] = padding(\n seg.gaes[:seqlen], seqlen, np.float32)\n next_seg[\"v_cur\"] = padding(\n seg.v_cur[:seqlen], seqlen, np.float32)\n next_seg[\"state_in\"] = np.array(\n seg.state_in[0], np.float32)\n next_seg[\"slots\"] = padding(\n len(seg.s[:seqlen]) * [1], seqlen, np.int32)\n\n seg_results.append(next_seg)\n seg = Seg(*[t[burn_in:] for t in seg])\n if any(seg_results):\n # print(\"full use one segs done!\")\n return seg_results\n else:\n return None\n\n class Envs(object):\n def __init__(self, act_space, act_repeats, frames,\n state_size, burn_in, seqlen, games):\n '''\n description: init the environment list \n param {params}\n return {*}\n '''\n self.envs = []\n for game in games:\n env = Env(act_space, act_repeats, frames,\n state_size, burn_in, seqlen, game)\n self.envs.append(env)\n\n def step(self, sess, model):\n '''\n description: step action according to neural network model\n param {\n sess: tensorflow session\n model: the neural network model\n }\n return {the list of Seg}\n '''\n feed_dict = self.get_feed_dict(model)\n\n # get predicted action from model\n a, a_logits, v_cur, state_in = sess.run(\n [model.current_act, model.current_act_logits,\n model.current_value, model.state_out],\n feed_dict=feed_dict\n )\n\n # step the predicted action in turn\n segs = [env.step(\n a[i][0],\n a_logits[i][0],\n v_cur[i][0],\n state_in[i]\n ) for (i, env) in enumerate(self.envs)]\n\n segs = [t2 for t1 in segs if t1 is not None for t2 in t1]\n\n return segs\n\n def get_feed_dict(self, model):\n '''\n description: get the feed_dict of model\n param {*}\n return {*}\n '''\n feed_dict = dict()\n feed_dict[model.s_t] = [[env.get_state()] for env in self.envs]\n feed_dict[model.previous_actions] = [[env.get_act()]\n for env in self.envs]\n feed_dict[model.prev_r] = [[env.r[-1]] for env in self.envs]\n feed_dict[model.state_in] = [env.get_state_in()\n for env in self.envs]\n return feed_dict\n\n return Envs\n\n\ndef build_env(kwargs):\n Envs = _warp_env()\n state_size = kwargs['state_size']\n action_repeats = kwargs['action_repeats']\n frames = kwargs[\"frames\"]\n parallel = kwargs['parallel']\n act_space = kwargs['act_space']\n burn_in = kwargs['burn_in']\n seqlen = kwargs['seqlen']\n\n games = [\"SuperMarioBros-%d-%d-v0\" %\n (i, j) for i in range(1, 9) for j in range(1, 5)]\n games = games * (parallel // len(games))\n\n envs = Envs(act_space, action_repeats, frames,\n state_size, burn_in, seqlen, games)\n\n return envs\n", "'''\nAuthor: hanyu\nDate: 2021-01-06 10:13:41\nLastEditTime: 2021-01-09 09:31:12\nLastEditors: hanyu\nDescription: policy network of PPO\nFilePath: /test_ppo/examples/PPO_super_mario_bros/policy_graph.py\n'''\nfrom ray_helper.miscellaneous import tf_model_ws\n\n\ndef warp_Model():\n '''\n description: warp the policy model\n param {*}\n return {Object: policy model}\n '''\n import tensorflow as tf\n from infer.categorical import categorical\n from utils.get_shape import get_shape\n\n @tf_model_ws\n class Model(object):\n def __init__(self,\n act_space,\n rnn,\n use_rmc,\n use_hrnn,\n use_reward_prediction,\n after_rnn,\n use_pixel_control,\n user_pixel_reconstruction,\n scope='agent',\n **kwargs):\n self.act_space = act_space\n self.use_rmc = use_rmc\n self.use_hrnn = use_hrnn\n self.scope = scope\n\n self.s_t = kwargs.get('s')\n self.prev_actions = kwargs.get('prev_a')\n self.prev_r = kwargs.get('prev_r')\n self.state_in = kwargs.get('state_in')\n\n prev_a = tf.one_hot(self.prev_actions,\n depth=act_space, dtype=tf.float32)\n\n # Feature Network\n self.feature, self.cnn_feature, self.image_feature, self.state_out = self.feature_net(\n self.s_t, rnn, prev_a, self.prev_r, self.state_in, scope + '_current_feature')\n\n if use_hrnn:\n # TODO\n pass\n\n # Actor Network\n self.current_act_logits = self.a_net(\n self.feature, scope + '_acurrent')\n self.current_act = tf.squeeze(\n categorical(self.current_act_logits), axis=-1)\n\n # Critic Network\n self.current_value = self.v_net(self.feature, scope + '_vcurrent')\n\n advantage = kwargs.get('adv', None)\n if advantage is not None:\n # Adavantage Normalization\n # adv = (adv - adv_mean)\n # adv = adv / adv_std\n self.old_current_value = kwargs.get('v_cur')\n self.ret = advantage + self.old_current_value\n\n self.a_t = kwargs.get('a')\n self.behavior_logits = kwargs.get('a_logits')\n self.r_t = kwargs.get('r')\n\n self.adv_mean = tf.reduce_mean(advantage, axis=[0, 1])\n advantage -= self.adv_mean\n self.adv_std = tf.math.sqrt(\n tf.reduce_mean(advantage ** 2, axis=[0, 1]))\n self.advantage = advantage / tf.maximum(self.adv_std, 1e-12)\n\n self.slots = tf.cast(kwargs.get('slots'), tf.float32)\n\n if use_reward_prediction:\n # TODO\n # reward prediction network\n pass\n\n if user_pixel_reconstruction:\n # TODO\n # pixerl reconstruction network\n pass\n\n if use_pixel_control:\n # TODO\n # pixel control network\n pass\n\n def get_current_act(self):\n return self.current_act\n\n def get_current_logits(self):\n return self.current_act_logits\n\n def feature_net(self, image, rnn, prev_a, prev_r, state_in, scope='feature'):\n '''\n description: feature-extraction network\n param {\n image: the input image\n rnn: rnn network\n prev_a: previous action\n pre_v: previous value\n state_in: state_in using in rnn\n }\n return {\n Tensor[feature]: the feature input of actor&critic\n Tensor[cnn_feature]: the cnn_feature input of reward prediction net\n Tensor[image_feature]: the image_feature input of coex adm\n Tensor[state_out]: the state_out after feature_net\n }\n '''\n shape = get_shape(image)\n with tf.variable_scope(scope, tf.AUTO_REUSE):\n image = tf.reshape(image, [-1] + shape[-3:])\n filter = [16, 32, 32]\n kernel = [(3, 3), (3, 3), (5, 3)]\n stride = [(1, 2), (1, 2), (2, 1)]\n\n for i in range(len(filter)):\n image = tf.layers.conv2d(\n image,\n filters=filter[i],\n kernel_size=kernel[i][0],\n strides=stride[i][0],\n padding='valid',\n activation=None,\n name=f'conv_{i}'\n )\n image = tf.layers.max_pooling2d(\n image,\n pool_size=kernel[i][1],\n strides=stride[i][1],\n padding='valid',\n name=f'max_pool_{i}'\n )\n image = self.residual_block(image, f'res0_{i}')\n image = tf.nn.relu(image)\n\n new_shape = get_shape(image)\n # the batch_size & seqlen dimensions remain the same\n image_feature = tf.reshape(\n image, [shape[0], shape[1], new_shape[1], new_shape[2], new_shape[3]])\n\n feature = tf.reshape(\n image, [shape[0], shape[1], new_shape[1] * new_shape[2] * new_shape[3]])\n\n cnn_feature = tf.layers.dense(\n feature, 256, tf.nn.relu, name='feature')\n feature = tf.concat(\n [cnn_feature, prev_a, prev_r[:, :, None]], axis=-1)\n\n if self.use_hrnn:\n # TODO\n pass\n elif self.use_rmc:\n # TODO\n pass\n else:\n initial_state = tf.split(state_in, 2, axis=-1)\n feature, c_out, h_out = rnn(\n feature, initial_state=initial_state)\n state_out = tf.concat([c_out, h_out], axis=-1)\n\n return feature, cnn_feature, image_feature, state_out\n\n def a_net(self, feature, scope):\n '''\n description: actor network\n param {feature: the output of feature_net}\n return {Tensor: the act_logits tensor}\n '''\n net = feature\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n net = tf.layers.dense(net, get_shape(\n feature)[-1], activation=tf.nn.relu, name='dense')\n act_logits = tf.layers.dense(\n net, self.act_space, activation=None, name='a_logits')\n return act_logits\n\n def v_net(self, feature, scope):\n '''\n description: value network as critic\n param {feature: the output of feature_net}\n return {Tensor: the v_value tensor}\n '''\n net = feature\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n net = tf.layers.dense(\n net,\n get_shape(feature)[-1],\n activation=tf.nn.relu,\n name='dense'\n )\n v_value = tf.squeeze(\n tf.layers.dense(\n net,\n 1,\n activation=None,\n name='v_value'\n ),\n axis=-1\n )\n return v_value\n\n def reconstruct_net(self):\n # TODO\n pass\n\n def control_net(self):\n # TODO\n pass\n\n @staticmethod\n def residual_block(input, scope):\n shape = get_shape(input)\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n last_output = tf.nn.relu(input)\n last_output = tf.layers.conv2d(\n last_output,\n filters=shape[-1],\n kernel_size=3,\n strides=1,\n padding='same',\n activation=None,\n name='conv0'\n )\n last_output = tf.nn.relu(last_output)\n last_output = tf.layers.conv2d(\n last_output,\n filters=shape[-1],\n kernel_size=3,\n strides=1,\n padding='same',\n activation=None,\n name='conv1'\n )\n output = last_output + input\n return output\n\n return Model\n" ]
[ [ "numpy.tile", "numpy.concatenate", "numpy.zeros_like", "numpy.array", "numpy.zeros" ], [ "tensorflow.nn.relu", "tensorflow.layers.conv2d", "tensorflow.concat", "tensorflow.reduce_mean", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.layers.dense", "tensorflow.layers.max_pooling2d", "tensorflow.one_hot", "tensorflow.variable_scope", "tensorflow.split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
monkeypants/CartridgeOCR
[ "a2cdaa72e3839a881118b85f5ff7b4515579004b" ]
[ "src/model/dataProcessing/coco_utils.py" ]
[ "import copy\nimport os\nimport torch\nimport torch.utils.data\nimport torchvision\nfrom pycocotools import mask as coco_mask\nfrom pycocotools.coco import COCO\nimport dataProcessing.transforms as T\nimport logging\n\n\nclass FilterAndRemapCocoCategories(object):\n def __init__(self, categories, remap=True):\n self.categories = categories\n self.remap = remap\n\n def __call__(self, image, target):\n anno = target[\"annotations\"]\n anno = [obj for obj in anno if obj[\"category_id\"] in self.categories]\n if not self.remap:\n target[\"annotations\"] = anno\n return image, target\n anno = copy.deepcopy(anno)\n for obj in anno:\n obj[\"category_id\"] = self.categories.index(obj[\"category_id\"])\n target[\"annotations\"] = anno\n return image, target\n\n\ndef convert_polygons(polygons, height, width):\n max_width = 1080\n if width > max_width:\n logging.warn('invalid width needs normalizing')\n polyout = []\n for p in polygons:\n mult = [width, height] * (len(p) // 2)\n assert(len(mult) == len(p))\n polyout.append([x * y for x, y in zip(p, mult)])\n return polyout\n\n\ndef transform_coco_polygon(segmentations, height, width):\n result = []\n for polygons in segmentations:\n # print('polygons: ',polygons)\n polyout = convert_polygons(polygons, height, width) \n result.append(polyout)\n return result\n\n\ndef convert_coco_poly_to_mask(segmentations, height, width):\n masks = []\n for polygons in segmentations:\n # print('polygons: ',polygons)\n polygons = convert_polygons(polygons, height, width)\n # print('poly2', polygons)\n rles = coco_mask.frPyObjects(polygons, height, width)\n mask = coco_mask.decode(rles)\n if len(mask.shape) < 3:\n mask = mask[..., None]\n mask = torch.as_tensor(mask, dtype=torch.uint8)\n mask = mask.any(dim=2)\n masks.append(mask)\n if masks:\n masks = torch.stack(masks, dim=0)\n else:\n masks = torch.zeros((0, height, width), dtype=torch.uint8)\n return masks\n\n\ndef transform_coco_annotation(anno, height, width):\n anno['segmentation'] = convert_polygons(anno['segmentation'], height, width)\n anno['bbox'] = [x * y for (x, y) in zip(anno['bbox'], [width, height, width, height])]\n for i in range(2, len(anno['bbox'])):\n anno['bbox'][i] += anno['bbox'][i - 2]\n\n\nclass ConvertCocoPolysToMask(object):\n def __call__(self, image, target):\n w, h = image.size\n # print(w,h)\n\n image_id = target[\"image_id\"]\n image_id = torch.tensor([image_id])\n\n anno = target[\"annotations\"]\n \n # TODO: now fixed in the conversion script.\n # for obj in anno:\n # obj['iscrowd']=0\n\n anno = [obj for obj in anno if obj['iscrowd'] == 0]\n\n boxes = [obj[\"bbox\"] for obj in anno]\n # guard against no boxes via resizing\n boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)\n boxes[:, 2:] += boxes[:, :2]\n boxes *= torch.as_tensor([w, h, w, h])\n boxes[:, 0::2].clamp_(min=0, max=w)\n boxes[:, 1::2].clamp_(min=0, max=h)\n\n classes = [obj[\"category_id\"] for obj in anno]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n segmentations = [obj[\"segmentation\"] for obj in anno]\n masks = convert_coco_poly_to_mask(segmentations, h, w)\n\n keypoints = None\n if anno and \"keypoints\" in anno[0]:\n keypoints = [obj[\"keypoints\"] for obj in anno]\n keypoints = torch.as_tensor(keypoints, dtype=torch.float32)\n num_keypoints = keypoints.shape[0]\n if num_keypoints:\n keypoints = keypoints.view(num_keypoints, -1, 3)\n\n keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) \n boxes = boxes[keep]\n classes = classes[keep]\n masks = masks[keep]\n if keypoints is not None:\n keypoints = keypoints[keep]\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = classes\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n if keypoints is not None:\n target[\"keypoints\"] = keypoints\n\n # for conversion to coco api\n area = torch.tensor([obj[\"area\"] for obj in anno])\n iscrowd = torch.tensor([obj[\"iscrowd\"] for obj in anno])\n # iscrowd = torch.tensor([0 for obj in anno])\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n\n return image, target\n\n\ndef _coco_remove_images_without_annotations(dataset, cat_list=None):\n def _has_only_empty_bbox(anno):\n return all(any(o <= 1 for o in obj[\"bbox\"][2:]) for obj in anno)\n\n def _count_visible_keypoints(anno):\n return sum(sum(1 for v in ann[\"keypoints\"][2::3] if v > 0) for ann in anno)\n\n min_keypoints_per_image = 10\n\n def _has_valid_annotation(anno):\n # if it's empty, there is no annotation\n if len(anno) == 0:\n return False\n # if all boxes have close to zero area, there is no annotation\n if _has_only_empty_bbox(anno):\n return False\n # keypoints task have a slight different critera for considering\n # if an annotation is valid\n if \"keypoints\" not in anno[0]:\n return True\n # for keypoint detection tasks, only consider valid images those\n # containing at least min_keypoints_per_image\n if _count_visible_keypoints(anno) >= min_keypoints_per_image:\n return True\n return False\n\n assert isinstance(dataset, torchvision.datasets.CocoDetection)\n ids = []\n for ds_idx, img_id in enumerate(dataset.ids):\n ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = dataset.coco.loadAnns(ann_ids)\n if cat_list:\n anno = [obj for obj in anno if obj[\"category_id\"] in cat_list]\n if _has_valid_annotation(anno):\n ids.append(ds_idx)\n\n dataset = torch.utils.data.Subset(dataset, ids)\n return dataset\n\n\ndef convert_to_coco_api(ds):\n coco_ds = COCO()\n ann_id = 0\n dataset = {'images': [], 'categories': [], 'annotations': []}\n categories = set()\n for img_idx in range(len(ds)):\n # find better way to get target\n # targets = ds.get_annotations(img_idx)\n img, targets = ds[img_idx]\n image_id = targets[\"image_id\"].item()\n img_dict = {}\n img_dict['id'] = image_id\n img_dict['height'] = img.shape[-2]\n img_dict['width'] = img.shape[-1]\n img_dict['image'] = img\n dataset['images'].append(img_dict)\n bboxes = targets[\"boxes\"]\n bboxes[:, 2:] -= bboxes[:, :2]\n bboxes = bboxes.tolist()\n labels = targets['labels'].tolist()\n areas = targets['area'].tolist()\n iscrowd = targets['iscrowd'].tolist()\n if 'masks' in targets:\n masks = targets['masks']\n # make masks Fortran contiguous for coco_mask\n masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)\n if 'keypoints' in targets:\n keypoints = targets['keypoints']\n keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()\n num_objs = len(bboxes)\n for i in range(num_objs):\n ann = {}\n ann['image_id'] = image_id\n ann['bbox'] = bboxes[i]\n ann['category_id'] = labels[i]\n categories.add(labels[i])\n ann['area'] = areas[i]\n ann['iscrowd'] = iscrowd[i]\n ann['id'] = ann_id\n if 'masks' in targets:\n ann[\"segmentation\"] = coco_mask.encode(masks[i].numpy())\n if 'keypoints' in targets:\n ann['keypoints'] = keypoints[i]\n ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])\n dataset['annotations'].append(ann)\n ann_id += 1\n dataset['categories'] = [{'id': i} for i in sorted(categories)]\n coco_ds.dataset = dataset\n coco_ds.createIndex()\n return coco_ds\n\n\ndef get_coco_api_from_dataset(dataset):\n for i in range(10):\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n break\n if isinstance(dataset, torch.utils.data.Subset):\n dataset = dataset.dataset\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n return dataset.coco\n return convert_to_coco_api(dataset)\n\n\nclass CocoDetection(torchvision.datasets.CocoDetection):\n def __init__(self, img_folder, ann_file, transforms):\n super(CocoDetection, self).__init__(img_folder, ann_file)\n self._transforms = transforms\n\n def __getitem__(self, idx):\n img, target = super(CocoDetection, self).__getitem__(idx)\n image_id = self.ids[idx]\n # print(image_id)\n target = dict(image_id=image_id, annotations=target)\n if self._transforms is not None:\n img, target = self._transforms(img, target)\n return img, target\n\n @staticmethod\n def get_coco_api(dataset, transform=False): \n for i in range(10):\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n break\n if isinstance(dataset, torch.utils.data.Subset):\n dataset = dataset.dataset\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n if not transform:\n return dataset.coco\n else:\n return dataset.transform_coco_api()\n raise Exception(\"No instance of CocoDetection found\")\n\n def transform_coco_api(self):\n coco = copy.deepcopy(self.coco)\n\n image_sizes = {}\n for img, target in self:\n image_sizes[target['image_id'].item()] = img.size()[1:] # TODO: width vs height. Always len 3?\n\n for img in coco.dataset['images']:\n (h, w) = image_sizes[img['id']]\n img['width'] = w\n img['height'] = h\n\n for ann in coco.dataset['annotations']:\n id = ann['image_id']\n (h, w) = image_sizes[id]\n transform_coco_annotation(ann, h, w)\n\n coco.createIndex()\n return coco\n\n\ndef get_coco(root, image_set, transforms, mode='instances'):\n anno_file_template = \"{}_{}2017.json\"\n PATHS = {\n \"train\": (\"train2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"train\"))),\n \"val\": (\"val2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"val\"))),\n # \"train\": (\"val2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"val\")))\n }\n\n t = [ConvertCocoPolysToMask()]\n\n if transforms is not None:\n t.append(transforms)\n transforms = T.Compose(t)\n\n img_folder, ann_file = PATHS[image_set]\n img_folder = os.path.join(root, img_folder)\n ann_file = os.path.join(root, ann_file)\n\n dataset = CocoDetection(img_folder, ann_file, transforms=transforms)\n\n if image_set == \"train\":\n dataset = _coco_remove_images_without_annotations(dataset)\n\n # dataset = torch.utils.data.Subset(dataset, [i for i in range(500)])\n\n return dataset\n\n\ndef get_coco_kp(root, image_set, transforms):\n return get_coco(root, image_set, transforms, mode=\"person_keypoints\")\n" ]
[ [ "torch.zeros", "torch.tensor", "torch.as_tensor", "torch.utils.data.Subset", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
crisely09/pyscf
[ "cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6", "cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6", "cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6", "cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6", "cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6" ]
[ "pyscf/fci/selected_ci.py", "pyscf/pbc/dft/numint.py", "pyscf/dft/test/test_xcfun.py", "pyscf/prop/gtensor/uhf.py", "pyscf/mcscf/addons.py" ]
[ "#!/usr/bin/env python\n# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nSelected CI\n\nSimple usage::\n\n >>> from pyscf import gto, scf, ao2mo, fci\n >>> mol = gto.M(atom='C 0 0 0; C 0 0 1')\n >>> mf = scf.RHF(mol).run()\n >>> h1 = mf.mo_coeff.T.dot(mf.get_hcore()).dot(mf.mo_coeff)\n >>> h2 = ao2mo.kernel(mol, mf.mo_coeff)\n >>> e = fci.selected_ci.kernel(h1, h2, mf.mo_coeff.shape[1], mol.nelectron)[0]\n'''\n\nimport ctypes\nimport numpy\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf import ao2mo\nfrom pyscf.fci import cistring\nfrom pyscf.fci import direct_spin1\nfrom pyscf.fci import rdm\nfrom pyscf import __config__\n\nlibfci = lib.load_library('libfci')\n\ndef contract_2e(eri, civec_strs, norb, nelec, link_index=None):\n ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)\n if link_index is None:\n link_index = _all_linkstr_index(ci_strs, norb, nelec)\n cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index\n na, nlinka = cd_indexa.shape[:2]\n nb, nlinkb = cd_indexb.shape[:2]\n\n eri = ao2mo.restore(1, eri, norb)\n eri1 = eri.transpose(0,2,1,3) - eri.transpose(0,2,3,1)\n idx,idy = numpy.tril_indices(norb, -1)\n idx = idx * norb + idy\n eri1 = lib.take_2d(eri1.reshape(norb**2,-1), idx, idx) * 2\n fcivec = ci_coeff.reshape(na,nb)\n # (bb|bb)\n if nelec[1] > 1:\n mb, mlinkb = dd_indexb.shape[:2]\n fcivecT = lib.transpose(fcivec)\n ci1T = numpy.zeros((nb,na))\n libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),\n fcivecT.ctypes.data_as(ctypes.c_void_p),\n ci1T.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb),\n ctypes.c_int(nb), ctypes.c_int(na),\n ctypes.c_int(mb), ctypes.c_int(mlinkb),\n dd_indexb.ctypes.data_as(ctypes.c_void_p))\n ci1 = lib.transpose(ci1T, out=fcivecT)\n else:\n ci1 = numpy.zeros_like(fcivec)\n # (aa|aa)\n if nelec[0] > 1:\n ma, mlinka = dd_indexa.shape[:2]\n libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),\n fcivec.ctypes.data_as(ctypes.c_void_p),\n ci1.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb),\n ctypes.c_int(na), ctypes.c_int(nb),\n ctypes.c_int(ma), ctypes.c_int(mlinka),\n dd_indexa.ctypes.data_as(ctypes.c_void_p))\n\n h_ps = numpy.einsum('pqqs->ps', eri)\n eri1 = eri * 2\n for k in range(norb):\n eri1[:,:,k,k] += h_ps/nelec[0]\n eri1[k,k,:,:] += h_ps/nelec[1]\n eri1 = ao2mo.restore(4, eri1, norb)\n # (bb|aa)\n libfci.SCIcontract_2e_bbaa(eri1.ctypes.data_as(ctypes.c_void_p),\n fcivec.ctypes.data_as(ctypes.c_void_p),\n ci1.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb),\n ctypes.c_int(na), ctypes.c_int(nb),\n ctypes.c_int(nlinka), ctypes.c_int(nlinkb),\n cd_indexa.ctypes.data_as(ctypes.c_void_p),\n cd_indexb.ctypes.data_as(ctypes.c_void_p))\n\n return _as_SCIvector(ci1.reshape(ci_coeff.shape), ci_strs)\n\ndef select_strs(myci, eri, eri_pq_max, civec_max, strs, norb, nelec):\n strs = numpy.asarray(strs, dtype=numpy.int64)\n nstrs = len(strs)\n nvir = norb - nelec\n strs_add = numpy.empty((nstrs*(nelec*nvir)**2//4), dtype=numpy.int64)\n libfci.SCIselect_strs.restype = ctypes.c_int\n nadd = libfci.SCIselect_strs(strs_add.ctypes.data_as(ctypes.c_void_p),\n strs.ctypes.data_as(ctypes.c_void_p),\n eri.ctypes.data_as(ctypes.c_void_p),\n eri_pq_max.ctypes.data_as(ctypes.c_void_p),\n civec_max.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_double(myci.select_cutoff),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs))\n strs_add = sorted(set(strs_add[:nadd]) - set(strs))\n return numpy.asarray(strs_add, dtype=numpy.int64)\n\ndef enlarge_space(myci, civec_strs, eri, norb, nelec):\n if isinstance(civec_strs, (tuple, list)):\n nelec, (strsa, strsb) = _unpack(civec_strs[0], nelec)[1:]\n ci_coeff = lib.asarray(civec_strs)\n else:\n ci_coeff, nelec, (strsa, strsb) = _unpack(civec_strs, nelec)\n na = len(strsa)\n nb = len(strsb)\n ci0 = ci_coeff.reshape(-1,na,nb)\n civec_a_max = lib.norm(ci0, axis=2).max(axis=0)\n civec_b_max = lib.norm(ci0, axis=1).max(axis=0)\n ci_aidx = numpy.where(civec_a_max > myci.ci_coeff_cutoff)[0]\n ci_bidx = numpy.where(civec_b_max > myci.ci_coeff_cutoff)[0]\n civec_a_max = civec_a_max[ci_aidx]\n civec_b_max = civec_b_max[ci_bidx]\n strsa = strsa[ci_aidx]\n strsb = strsb[ci_bidx]\n\n eri = ao2mo.restore(1, eri, norb)\n eri_pq_max = abs(eri.reshape(norb**2,-1)).max(axis=1).reshape(norb,norb)\n\n strsa_add = select_strs(myci, eri, eri_pq_max, civec_a_max, strsa, norb, nelec[0])\n strsb_add = select_strs(myci, eri, eri_pq_max, civec_b_max, strsb, norb, nelec[1])\n strsa = numpy.append(strsa, strsa_add)\n strsb = numpy.append(strsb, strsb_add)\n aidx = numpy.argsort(strsa)\n bidx = numpy.argsort(strsb)\n ci_strs = (strsa[aidx], strsb[bidx])\n aidx = numpy.where(aidx < len(ci_aidx))[0]\n bidx = numpy.where(bidx < len(ci_bidx))[0]\n ma = len(strsa)\n mb = len(strsb)\n\n cs = []\n for i in range(ci0.shape[0]):\n ci1 = numpy.zeros((ma,mb))\n tmp = lib.take_2d(ci0[i], ci_aidx, ci_bidx)\n lib.takebak_2d(ci1, tmp, aidx, bidx)\n cs.append(_as_SCIvector(ci1, ci_strs))\n\n if not isinstance(civec_strs, (tuple, list)) and civec_strs.ndim < 3:\n cs = cs[0]\n return cs\n\ndef cre_des_linkstr(strs, norb, nelec, tril=False):\n '''Given intermediates, the link table to generate input strs\n '''\n strs = numpy.asarray(strs, dtype=numpy.int64)\n nvir = norb - nelec\n nstrs = len(strs)\n link_index = numpy.zeros((nstrs,nelec+nelec*nvir,4), dtype=numpy.int32)\n libfci.SCIcre_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nstrs),\n ctypes.c_int(nelec),\n strs.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(tril))\n return link_index\n\ndef cre_des_linkstr_tril(strs, norb, nelec):\n '''Given intermediates, the link table to generate input strs\n '''\n return cre_des_linkstr(strs, norb, nelec, True)\n\ndef des_des_linkstr(strs, norb, nelec, tril=False):\n '''Given intermediates, the link table to generate input strs\n '''\n if nelec < 2:\n return None\n\n strs = numpy.asarray(strs, dtype=numpy.int64)\n nvir = norb - nelec\n nstrs = len(strs)\n inter1 = numpy.empty((nstrs*nelec), dtype=numpy.int64)\n libfci.SCIdes_uniq_strs.restype = ctypes.c_int\n ninter = libfci.SCIdes_uniq_strs(inter1.ctypes.data_as(ctypes.c_void_p),\n strs.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs))\n inter1 = numpy.asarray(sorted(set(inter1[:ninter])), dtype=numpy.int64)\n ninter = len(inter1)\n\n inter = numpy.empty((ninter*nelec), dtype=numpy.int64)\n ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),\n inter1.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec-1),\n ctypes.c_int(ninter))\n inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)\n ninter = len(inter)\n\n nvir += 2\n link_index = numpy.zeros((ninter,nvir*nvir,4), dtype=numpy.int32)\n libfci.SCIdes_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs), ctypes.c_int(ninter),\n strs.ctypes.data_as(ctypes.c_void_p),\n inter.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(tril))\n return link_index\n\ndef des_des_linkstr_tril(strs, norb, nelec):\n '''Given intermediates, the link table to generate input strs\n '''\n return des_des_linkstr(strs, norb, nelec, True)\n\ndef gen_des_linkstr(strs, norb, nelec):\n '''Given intermediates, the link table to generate input strs\n '''\n if nelec < 1:\n return None\n\n strs = numpy.asarray(strs, dtype=numpy.int64)\n nvir = norb - nelec\n nstrs = len(strs)\n inter = numpy.empty((nstrs*nelec), dtype=numpy.int64)\n libfci.SCIdes_uniq_strs.restype = ctypes.c_int\n ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),\n strs.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs))\n inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)\n ninter = len(inter)\n\n nvir += 1\n link_index = numpy.zeros((ninter,nvir,4), dtype=numpy.int32)\n libfci.SCIdes_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs), ctypes.c_int(ninter),\n strs.ctypes.data_as(ctypes.c_void_p),\n inter.ctypes.data_as(ctypes.c_void_p))\n return link_index\n\ndef gen_cre_linkstr(strs, norb, nelec):\n '''Given intermediates, the link table to generate input strs\n '''\n if nelec == norb:\n return None\n\n strs = numpy.asarray(strs, dtype=numpy.int64)\n nvir = norb - nelec\n nstrs = len(strs)\n inter = numpy.empty((nstrs*nvir), dtype=numpy.int64)\n libfci.SCIcre_uniq_strs.restype = ctypes.c_int\n ninter = libfci.SCIcre_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),\n strs.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs))\n inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)\n ninter = len(inter)\n\n link_index = numpy.zeros((ninter,nelec+1,4), dtype=numpy.int32)\n libfci.SCIcre_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs), ctypes.c_int(ninter),\n strs.ctypes.data_as(ctypes.c_void_p),\n inter.ctypes.data_as(ctypes.c_void_p))\n return link_index\n\n\ndef make_hdiag(h1e, eri, ci_strs, norb, nelec):\n ci_coeff, nelec, ci_strs = _unpack(None, nelec, ci_strs)\n na = len(ci_strs[0])\n nb = len(ci_strs[1])\n hdiag = numpy.empty(na*nb)\n\n h1e = numpy.asarray(h1e, order='C')\n eri = ao2mo.restore(1, eri, norb)\n jdiag = numpy.asarray(numpy.einsum('iijj->ij',eri), order='C')\n kdiag = numpy.asarray(numpy.einsum('ijji->ij',eri), order='C')\n c_h1e = h1e.ctypes.data_as(ctypes.c_void_p)\n c_jdiag = jdiag.ctypes.data_as(ctypes.c_void_p)\n c_kdiag = kdiag.ctypes.data_as(ctypes.c_void_p)\n occslsta = cistring._strs2occslst(ci_strs[0], norb)\n occslstb = cistring._strs2occslst(ci_strs[1], norb)\n libfci.FCImake_hdiag_uhf(hdiag.ctypes.data_as(ctypes.c_void_p),\n c_h1e, c_h1e, c_jdiag, c_jdiag, c_jdiag, c_kdiag, c_kdiag,\n ctypes.c_int(norb),\n ctypes.c_int(na), ctypes.c_int(nb),\n ctypes.c_int(nelec[0]), ctypes.c_int(nelec[1]),\n occslsta.ctypes.data_as(ctypes.c_void_p),\n occslstb.ctypes.data_as(ctypes.c_void_p))\n return hdiag\n\ndef kernel_fixed_space(myci, h1e, eri, norb, nelec, ci_strs, ci0=None,\n tol=None, lindep=None, max_cycle=None, max_space=None,\n nroots=None, davidson_only=None,\n max_memory=None, verbose=None, ecore=0, **kwargs):\n log = logger.new_logger(myci, verbose)\n if tol is None: tol = myci.conv_tol\n if lindep is None: lindep = myci.lindep\n if max_cycle is None: max_cycle = myci.max_cycle\n if max_space is None: max_space = myci.max_space\n if max_memory is None: max_memory = myci.max_memory\n if nroots is None: nroots = myci.nroots\n if myci.verbose >= logger.WARN:\n myci.check_sanity()\n\n nelec = direct_spin1._unpack_nelec(nelec, myci.spin)\n ci0, nelec, ci_strs = _unpack(ci0, nelec, ci_strs)\n na = len(ci_strs[0])\n nb = len(ci_strs[1])\n h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)\n h2e = ao2mo.restore(1, h2e, norb)\n\n link_index = _all_linkstr_index(ci_strs, norb, nelec)\n hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)\n\n if isinstance(ci0, _SCIvector):\n if ci0.size == na*nb:\n ci0 = [ci0.ravel()]\n else:\n ci0 = [x.ravel() for x in ci0]\n else:\n ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)\n\n def hop(c):\n hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)\n return hc.reshape(-1)\n precond = lambda x, e, *args: x/(hdiag-e+1e-4)\n\n #e, c = lib.davidson(hop, ci0, precond, tol=myci.conv_tol)\n e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,\n max_cycle=max_cycle, max_space=max_space, nroots=nroots,\n max_memory=max_memory, verbose=log, **kwargs)\n if nroots > 1:\n return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]\n else:\n return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)\n\n\ndef kernel_float_space(myci, h1e, eri, norb, nelec, ci0=None,\n tol=None, lindep=None, max_cycle=None, max_space=None,\n nroots=None, davidson_only=None,\n max_memory=None, verbose=None, ecore=0, **kwargs):\n log = logger.new_logger(myci, verbose)\n if tol is None: tol = myci.conv_tol\n if lindep is None: lindep = myci.lindep\n if max_cycle is None: max_cycle = myci.max_cycle\n if max_space is None: max_space = myci.max_space\n if max_memory is None: max_memory = myci.max_memory\n if nroots is None: nroots = myci.nroots\n if myci.verbose >= logger.WARN:\n myci.check_sanity()\n\n nelec = direct_spin1._unpack_nelec(nelec, myci.spin)\n h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)\n h2e = ao2mo.restore(1, h2e, norb)\n\n# TODO: initial guess from CISD\n if isinstance(ci0, _SCIvector):\n if ci0.size == len(ci0._strs[0])*len(ci0._strs[1]):\n ci0 = [ci0.ravel()]\n else:\n ci0 = [x.ravel() for x in ci0]\n else:\n ci_strs = (numpy.asarray([int('1'*nelec[0], 2)]),\n numpy.asarray([int('1'*nelec[1], 2)]))\n ci0 = _as_SCIvector(numpy.ones((1,1)), ci_strs)\n ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)\n if ci0.size < nroots:\n log.warn('''\n Selected-CI space generated from HF ground state (by double exciting) is not enough for excited states.\n HOMO->LUMO excitations are included in the initial guess.\n NOTE: This may introduce excited states of different symmetry.\\n''')\n corea = '1' * (nelec[0]-1)\n coreb = '1' * (nelec[1]-1)\n ci_strs = (numpy.asarray([int('1'+corea, 2), int('10'+corea, 2)]),\n numpy.asarray([int('1'+coreb, 2), int('10'+coreb, 2)]))\n ci0 = _as_SCIvector(numpy.ones((2,2)), ci_strs)\n ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)\n if ci0.size < nroots:\n raise RuntimeError('Not enough selected-CI space for %d states' % nroots)\n ci_strs = ci0._strs\n hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)\n ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)\n\n def hop(c):\n hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)\n return hc.ravel()\n precond = lambda x, e, *args: x/(hdiag-e+myci.level_shift)\n\n namax = cistring.num_strings(norb, nelec[0])\n nbmax = cistring.num_strings(norb, nelec[1])\n e_last = 0\n float_tol = myci.start_tol\n tol_decay_rate = myci.tol_decay_rate\n conv = False\n for icycle in range(norb):\n ci_strs = ci0[0]._strs\n float_tol = max(float_tol*tol_decay_rate, tol*1e2)\n log.debug('cycle %d ci.shape %s float_tol %g',\n icycle, (len(ci_strs[0]), len(ci_strs[1])), float_tol)\n\n ci0 = [c.ravel() for c in ci0]\n link_index = _all_linkstr_index(ci_strs, norb, nelec)\n hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)\n #e, ci0 = lib.davidson(hop, ci0.reshape(-1), precond, tol=float_tol)\n e, ci0 = myci.eig(hop, ci0, precond, tol=float_tol, lindep=lindep,\n max_cycle=max_cycle, max_space=max_space, nroots=nroots,\n max_memory=max_memory, verbose=log, **kwargs)\n if nroots > 1:\n ci0 = [_as_SCIvector(c, ci_strs) for c in ci0]\n de, e_last = min(e)-e_last, min(e)\n log.info('cycle %d E = %s dE = %.8g', icycle, e+ecore, de)\n else:\n ci0 = [_as_SCIvector(ci0, ci_strs)]\n de, e_last = e-e_last, e\n log.info('cycle %d E = %.15g dE = %.8g', icycle, e+ecore, de)\n\n if ci0[0].shape == (namax,nbmax) or abs(de) < tol*1e3:\n conv = True\n break\n\n last_ci0_size = float(len(ci_strs[0])), float(len(ci_strs[1]))\n ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)\n na = len(ci0[0]._strs[0])\n nb = len(ci0[0]._strs[1])\n if ((.99 < na/last_ci0_size[0] < 1.01) and\n (.99 < nb/last_ci0_size[1] < 1.01)):\n conv = True\n break\n\n ci_strs = ci0[0]._strs\n log.debug('Extra CI in selected space %s', (len(ci_strs[0]), len(ci_strs[1])))\n ci0 = [c.ravel() for c in ci0]\n link_index = _all_linkstr_index(ci_strs, norb, nelec)\n hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)\n e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,\n max_cycle=max_cycle, max_space=max_space, nroots=nroots,\n max_memory=max_memory, verbose=log, **kwargs)\n\n na = len(ci_strs[0])\n nb = len(ci_strs[1])\n if nroots > 1:\n for i, ei in enumerate(e+ecore):\n log.info('Selected CI state %d E = %.15g', i, ei)\n return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]\n else:\n log.info('Selected CI E = %.15g', e+ecore)\n return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)\n\ndef kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,\n lindep=1e-14, max_cycle=50, max_space=12, nroots=1,\n davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,\n select_cutoff=1e-3, ci_coeff_cutoff=1e-3, ecore=0, **kwargs):\n return direct_spin1._kfactory(SelectedCI, h1e, eri, norb, nelec, ci0,\n level_shift, tol, lindep, max_cycle,\n max_space, nroots, davidson_only,\n pspace_size, select_cutoff=select_cutoff,\n ci_coeff_cutoff=ci_coeff_cutoff, ecore=ecore,\n **kwargs)\n\ndef make_rdm1s(civec_strs, norb, nelec, link_index=None):\n '''Spin separated 1-particle density matrices.\n The return values include two density matrices: (alpha,alpha), (beta,beta)\n\n dm1[p,q] = <q^\\dagger p>\n\n The convention is based on McWeeney's book, Eq (5.4.20).\n The contraction between 1-particle Hamiltonian and rdm1 is\n E = einsum('pq,qp', h1, rdm1)\n '''\n ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)\n if link_index is None:\n cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])\n cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])\n else:\n cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index\n rdm1a = rdm.make_rdm1_spin1('FCImake_rdm1a', ci_coeff, ci_coeff,\n norb, nelec, (cd_indexa,cd_indexb))\n rdm1b = rdm.make_rdm1_spin1('FCImake_rdm1b', ci_coeff, ci_coeff,\n norb, nelec, (cd_indexa,cd_indexb))\n return rdm1a, rdm1b\n\ndef make_rdm1(civec_strs, norb, nelec, link_index=None):\n r'''Spin-traced 1-particle density matrix.\n\n dm1[p,q] = <q_alpha^\\dagger p_alpha> + <q_beta^\\dagger p_beta>\n\n The convention is based on McWeeney's book, Eq (5.4.20)\n The contraction between 1-particle Hamiltonian and rdm1 is\n E = einsum('pq,qp', h1, rdm1)\n '''\n rdm1a, rdm1b = make_rdm1s(civec_strs, norb, nelec, link_index)\n return rdm1a + rdm1b\n\n# dm[p,q,r,s] = <|p^+ q r^+ s|>\ndef make_rdm2s(civec_strs, norb, nelec, link_index=None, **kwargs):\n r'''Spin separated 2-particle density matrices.\n The return values include three density matrices:\n (alpha,alpha,alpha,alpha), (alpha,alpha,beta,beta), (beta,beta,beta,beta)\n\n 2pdm[p,q,r,s] = :math:`\\langle p^\\dagger r^\\dagger s q\\rangle`\n '''\n ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)\n if link_index is None:\n cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])\n dd_indexa = des_des_linkstr(ci_strs[0], norb, nelec[0])\n cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])\n dd_indexb = des_des_linkstr(ci_strs[1], norb, nelec[1])\n else:\n cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index\n na, nlinka = cd_indexa.shape[:2]\n nb, nlinkb = cd_indexb.shape[:2]\n\n fcivec = ci_coeff.reshape(na,nb)\n # (bb|aa) and (aa|bb)\n dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', fcivec, fcivec,\n norb, nelec, (cd_indexa,cd_indexb), 0)[1]\n # (aa|aa)\n dm2aa = numpy.zeros([norb]*4)\n if nelec[0] > 1:\n ma, mlinka = dd_indexa.shape[:2]\n libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,\n dm2aa.ctypes.data_as(ctypes.c_void_p),\n fcivec.ctypes.data_as(ctypes.c_void_p),\n fcivec.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb),\n ctypes.c_int(na), ctypes.c_int(nb),\n ctypes.c_int(ma), ctypes.c_int(mlinka),\n dd_indexa.ctypes.data_as(ctypes.c_void_p))\n # (bb|bb)\n dm2bb = numpy.zeros([norb]*4)\n if nelec[1] > 1:\n mb, mlinkb = dd_indexb.shape[:2]\n fcivecT = lib.transpose(fcivec)\n libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,\n dm2bb.ctypes.data_as(ctypes.c_void_p),\n fcivecT.ctypes.data_as(ctypes.c_void_p),\n fcivecT.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb),\n ctypes.c_int(nb), ctypes.c_int(na),\n ctypes.c_int(mb), ctypes.c_int(mlinkb),\n dd_indexb.ctypes.data_as(ctypes.c_void_p))\n return dm2aa, dm2ab, dm2bb\n\ndef make_rdm2(civec_strs, norb, nelec, link_index=None, **kwargs):\n r'''Spin-traced two-particle density matrix.\n\n 2pdm[p,q,r,s] = :math:`\\langle p_\\alpha^\\dagger r_\\alpha^\\dagger s_\\alpha q_\\alpha\\rangle +\n \\langle p_\\beta^\\dagger r_\\alpha^\\dagger s_\\alpha q_\\beta\\rangle +\n \\langle p_\\alpha^\\dagger r_\\beta^\\dagger s_\\beta q_\\alpha\\rangle +\n \\langle p_\\beta^\\dagger r_\\beta^\\dagger s_\\beta q_\\beta\\rangle`.\n '''\n dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)\n dm2aa += dm2bb\n dm2aa += dm2ab\n dm2aa += dm2ab.transpose(2,3,0,1)\n return dm2aa\n\ndef trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index=None):\n r'''Spin separated transition 1-particle density matrices.\n See also function :func:`make_rdm1s`\n\n 1pdm[p,q] = :math:`\\langle q^\\dagger p \\rangle`\n '''\n cibra, nelec, ci_strs = _unpack(cibra_strs, nelec)\n ciket, nelec1, ci_strs1 = _unpack(ciket_strs, nelec)\n assert(all(ci_strs[0] == ci_strs1[0]) and\n all(ci_strs[1] == ci_strs1[1]))\n if link_index is None:\n cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])\n cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])\n else:\n cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index\n rdm1a = rdm.make_rdm1_spin1('FCItrans_rdm1a', cibra, ciket,\n norb, nelec, (cd_indexa,cd_indexb))\n rdm1b = rdm.make_rdm1_spin1('FCItrans_rdm1b', cibra, ciket,\n norb, nelec, (cd_indexa,cd_indexb))\n return rdm1a, rdm1b\n\ndef trans_rdm1(cibra_strs, ciket_strs, norb, nelec, link_index=None):\n r'''Spin traced transition 1-particle density matrices.\n See also function :func:`make_rdm1`\n\n 1pdm[p,q] = :math:`\\langle q_\\alpha^\\dagger p_\\alpha \\rangle\n + \\langle q_\\beta^\\dagger p_\\beta \\rangle`\n '''\n rdm1a, rdm1b = trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index)\n return rdm1a + rdm1b\n\ndef spin_square(civec_strs, norb, nelec):\n '''Spin square for RHF-FCI CI wfn only (obtained from spin-degenerated\n Hamiltonian)'''\n ci1 = contract_ss(civec_strs, norb, nelec)\n\n ss = numpy.einsum('ij,ij->', civec_strs.reshape(ci1.shape), ci1)\n s = numpy.sqrt(ss+.25) - .5\n multip = s*2+1\n return ss, multip\n\ndef contract_ss(civec_strs, norb, nelec):\n r''' S^2 |\\Psi\\rangle\n '''\n ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)\n strsa, strsb = ci_strs\n neleca, nelecb = nelec\n ci_coeff = ci_coeff.reshape(len(strsa),len(strsb))\n\n def gen_map(fstr_index, strs, nelec, des=True):\n a_index = fstr_index(strs, norb, nelec)\n amap = numpy.zeros((a_index.shape[0],norb,2), dtype=numpy.int32)\n if des:\n for k, tab in enumerate(a_index):\n sign = tab[:,3]\n tab = tab[sign!=0]\n amap[k,tab[:,1]] = tab[:,2:]\n else:\n for k, tab in enumerate(a_index):\n sign = tab[:,3]\n tab = tab[sign!=0]\n amap[k,tab[:,0]] = tab[:,2:]\n return amap\n\n if neleca > 0:\n ades = gen_map(gen_des_linkstr, strsa, neleca)\n else:\n ades = None\n\n if nelecb > 0:\n bdes = gen_map(gen_des_linkstr, strsb, nelecb)\n else:\n bdes = None\n\n if neleca < norb:\n acre = gen_map(gen_cre_linkstr, strsa, neleca, False)\n else:\n acre = None\n\n if nelecb < norb:\n bcre = gen_map(gen_cre_linkstr, strsb, nelecb, False)\n else:\n bcre = None\n\n def trans(ci1, aindex, bindex):\n if aindex is None or bindex is None:\n return None\n\n ma = len(aindex)\n mb = len(bindex)\n t1 = numpy.zeros((ma,mb))\n for i in range(norb):\n signa = aindex[:,i,1]\n signb = bindex[:,i,1]\n maska = numpy.where(signa!=0)[0]\n maskb = numpy.where(signb!=0)[0]\n addra = aindex[maska,i,0]\n addrb = bindex[maskb,i,0]\n citmp = lib.take_2d(ci_coeff, addra, addrb)\n citmp *= signa[maska].reshape(-1,1)\n citmp *= signb[maskb]\n #: t1[addra.reshape(-1,1),addrb] += citmp\n lib.takebak_2d(t1, citmp, maska, maskb)\n for i in range(norb):\n signa = aindex[:,i,1]\n signb = bindex[:,i,1]\n maska = numpy.where(signa!=0)[0]\n maskb = numpy.where(signb!=0)[0]\n addra = aindex[maska,i,0]\n addrb = bindex[maskb,i,0]\n citmp = lib.take_2d(t1, maska, maskb)\n citmp *= signa[maska].reshape(-1,1)\n citmp *= signb[maskb]\n #: ci1[maska.reshape(-1,1), maskb] += citmp\n lib.takebak_2d(ci1, citmp, addra, addrb)\n\n ci1 = numpy.zeros_like(ci_coeff)\n trans(ci1, ades, bcre) # S+*S-\n trans(ci1, acre, bdes) # S-*S+\n ci1 *= .5\n ci1 += (neleca-nelecb)**2*.25*ci_coeff\n return _as_SCIvector(ci1, ci_strs)\n\ndef to_fci(civec_strs, norb, nelec):\n ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)\n addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]\n addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]\n na = cistring.num_strings(norb, nelec[0])\n nb = cistring.num_strings(norb, nelec[1])\n ci0 = numpy.zeros((na,nb))\n lib.takebak_2d(ci0, ci_coeff, addrsa, addrsb)\n return ci0\n\ndef from_fci(fcivec, ci_strs, norb, nelec):\n fcivec, nelec, ci_strs = _unpack(fcivec, nelec, ci_strs)\n addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]\n addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]\n na = cistring.num_strings(norb, nelec[0])\n nb = cistring.num_strings(norb, nelec[1])\n fcivec = fcivec.reshape(na,nb)\n civec = lib.take_2d(fcivec, addrsa, addrsb)\n return _as_SCIvector(civec, ci_strs)\n\n\nclass SelectedCI(direct_spin1.FCISolver):\n\n ci_coeff_cutoff = getattr(__config__, 'fci_selected_ci_SCI_ci_coeff_cutoff', .5e-3)\n select_cutoff = getattr(__config__, 'fci_selected_ci_SCI_select_cutoff', .5e-3)\n conv_tol = getattr(__config__, 'fci_selected_ci_SCI_conv_tol', 1e-9)\n start_tol = getattr(__config__, 'fci_selected_ci_SCI_start_tol', 3e-4)\n tol_decay_rate = getattr(__config__, 'fci_selected_ci_SCI_tol_decay_rate', 0.3)\n\n def __init__(self, mol=None):\n direct_spin1.FCISolver.__init__(self, mol)\n\n##################################################\n# don't modify the following attributes, they are not input options\n #self.converged = False\n #self.ci = None\n self._strs = None\n keys = set(('ci_coeff_cutoff', 'select_cutoff', 'conv_tol',\n 'start_tol', 'tol_decay_rate'))\n self._keys = self._keys.union(keys)\n\n def dump_flags(self, verbose=None):\n direct_spin1.FCISolver.dump_flags(self, verbose)\n logger.info(self, 'ci_coeff_cutoff %g', self.ci_coeff_cutoff)\n logger.info(self, 'select_cutoff %g', self.select_cutoff)\n\n def contract_2e(self, eri, civec_strs, norb, nelec, link_index=None, **kwargs):\n# The argument civec_strs is a CI vector in function FCISolver.contract_2e.\n# Save and patch self._strs to make this contract_2e function compatible to\n# FCISolver.contract_2e.\n if getattr(civec_strs, '_strs', None) is not None:\n self._strs = civec_strs._strs\n else:\n assert(civec_strs.size == len(self._strs[0])*len(self._strs[1]))\n civec_strs = _as_SCIvector(civec_strs, self._strs)\n return contract_2e(eri, civec_strs, norb, nelec, link_index)\n\n def get_init_guess(self, ci_strs, norb, nelec, nroots, hdiag):\n '''Initial guess is the single Slater determinant\n '''\n na = len(ci_strs[0])\n nb = len(ci_strs[1])\n ci0 = direct_spin1._get_init_guess(na, nb, nroots, hdiag)\n return [_as_SCIvector(x, ci_strs) for x in ci0]\n\n def make_hdiag(self, h1e, eri, ci_strs, norb, nelec):\n return make_hdiag(h1e, eri, ci_strs, norb, nelec)\n\n enlarge_space = enlarge_space\n kernel = kernel_float_space\n kernel_fixed_space = kernel_fixed_space\n\n# def approx_kernel(self, h1e, eri, norb, nelec, ci0=None, link_index=None,\n# tol=None, lindep=None, max_cycle=None,\n# max_memory=None, verbose=None, **kwargs):\n# ci_strs = getattr(ci0, '_strs', self._strs)\n# return self.kernel_fixed_space(h1e, eri, norb, nelec, ci_strs,\n# ci0, link_index, tol, lindep, 6,\n# max_memory, verbose, **kwargs)\n\n @lib.with_doc(spin_square.__doc__)\n def spin_square(self, civec_strs, norb, nelec):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n return spin_square(_as_SCIvector_if_not(civec_strs, self._strs), norb, nelec)\n\n def large_ci(self, civec_strs, norb, nelec, tol=.1, return_strs=True):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n ci, _, (strsa, strsb) = _unpack(civec_strs, nelec, self._strs)\n addra, addrb = numpy.where(abs(ci) > tol)\n if return_strs:\n strsa = [bin(x) for x in strsa[addra]]\n strsb = [bin(x) for x in strsb[addrb]]\n return list(zip(ci[addra,addrb], strsa, strsb))\n else:\n occslsta = cistring._strs2occslst(strsa[addra], norb)\n occslstb = cistring._strs2occslst(strsb[addrb], norb)\n return list(zip(ci[addra,addrb], occslsta, occslstb))\n\n def contract_ss(self, fcivec, norb, nelec):\n return contract_ss(fcivec, norb, nelec)\n\n @lib.with_doc(make_rdm1s.__doc__)\n def make_rdm1s(self, civec_strs, norb, nelec, link_index=None):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)\n return make_rdm1s(civec_strs, norb, nelec, link_index)\n\n @lib.with_doc(make_rdm1.__doc__)\n def make_rdm1(self, civec_strs, norb, nelec, link_index=None):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n rdm1a, rdm1b = self.make_rdm1s(civec_strs, norb, nelec, link_index)\n return rdm1a + rdm1b\n\n @lib.with_doc(make_rdm2s.__doc__)\n def make_rdm2s(self, civec_strs, norb, nelec, link_index=None, **kwargs):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)\n return make_rdm2s(civec_strs, norb, nelec, link_index)\n\n @lib.with_doc(make_rdm2.__doc__)\n def make_rdm2(self, civec_strs, norb, nelec, link_index=None, **kwargs):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)\n return make_rdm2(civec_strs, norb, nelec, link_index)\n\n def make_rdm12s(self, civec_strs, norb, nelec, link_index=None, **kwargs):\n neleca, nelecb = nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)\n dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)\n if neleca > 1 and nelecb > 1:\n dm1a = numpy.einsum('iikl->kl', dm2aa) / (neleca-1)\n dm1b = numpy.einsum('iikl->kl', dm2bb) / (nelecb-1)\n else:\n dm1a, dm1b = make_rdm1s(civec_strs, norb, nelec, link_index)\n return (dm1a, dm1b), (dm2aa, dm2ab, dm2bb)\n\n def make_rdm12(self, civec_strs, norb, nelec, link_index=None, **kwargs):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n nelec_tot = sum(nelec)\n civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)\n dm2 = make_rdm2(civec_strs, norb, nelec, link_index)\n if nelec_tot > 1:\n dm1 = numpy.einsum('iikl->kl', dm2) / (nelec_tot-1)\n else:\n dm1 = make_rdm1(civec_strs, norb, nelec, link_index)\n return dm1, dm2\n\n @lib.with_doc(trans_rdm1s.__doc__)\n def trans_rdm1s(self, cibra, ciket, norb, nelec, link_index=None):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n cibra = _as_SCIvector_if_not(cibra, self._strs)\n ciket = _as_SCIvector_if_not(ciket, self._strs)\n return trans_rdm1s(cibra, ciket, norb, nelec, link_index)\n\n @lib.with_doc(trans_rdm1.__doc__)\n def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n cibra = _as_SCIvector_if_not(cibra, self._strs)\n ciket = _as_SCIvector_if_not(ciket, self._strs)\n return trans_rdm1(cibra, ciket, norb, nelec, link_index)\n\n def gen_linkstr(self, norb, nelec, tril=True, spin=None, ci_strs=None):\n if spin is None:\n spin = self.spin\n if ci_strs is None:\n ci_strs = self._strs\n neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)\n if tril:\n cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, neleca)\n dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, neleca)\n cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelecb)\n dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelecb)\n else:\n cd_indexa = cre_des_linkstr(ci_strs[0], norb, neleca)\n dd_indexa = des_des_linkstr(ci_strs[0], norb, neleca)\n cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelecb)\n dd_indexb = des_des_linkstr(ci_strs[1], norb, nelecb)\n return cd_indexa, dd_indexa, cd_indexb, dd_indexb\n\nSCI = SelectedCI\n\n\ndef _unpack(civec_strs, nelec, ci_strs=None, spin=None):\n neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)\n ci_strs = getattr(civec_strs, '_strs', ci_strs)\n if ci_strs is not None:\n strsa, strsb = ci_strs\n strsa = numpy.asarray(strsa)\n strsb = numpy.asarray(strsb)\n ci_strs = (strsa, strsb)\n return civec_strs, (neleca, nelecb), ci_strs\n\ndef _all_linkstr_index(ci_strs, norb, nelec):\n cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, nelec[0])\n dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, nelec[0])\n cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelec[1])\n dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelec[1])\n return cd_indexa, dd_indexa, cd_indexb, dd_indexb\n\n# numpy.ndarray does not allow to attach attribtues. Overwrite the\n# numpy.ndarray class to tag the ._strs attribute\nclass _SCIvector(numpy.ndarray):\n def __array_finalize__(self, obj):\n self._strs = getattr(obj, '_strs', None)\n\n # Whenever the contents of the array was modified (through ufunc), the tag\n # should be expired. Overwrite the output of ufunc to restore ndarray type.\n def __array_wrap__(self, out, context=None):\n return numpy.ndarray.__array_wrap__(self, out, context).view(numpy.ndarray)\n\ndef _as_SCIvector(civec, ci_strs):\n civec = civec.view(_SCIvector)\n civec._strs = ci_strs\n return civec\n\ndef _as_SCIvector_if_not(civec, ci_strs):\n if getattr(civec, '_strs', None) is None:\n civec = _as_SCIvector(civec, ci_strs)\n return civec\n\nif __name__ == '__main__':\n from functools import reduce\n from pyscf import gto\n from pyscf import scf\n from pyscf import ao2mo\n from pyscf.fci import spin_op\n from pyscf.fci import addons\n\n mol = gto.Mole()\n mol.verbose = 0\n mol.output = None\n mol.atom = [\n ['H', ( 1.,-1. , 0. )],\n ['H', ( 0.,-1. ,-1. )],\n ['H', ( 1.,-0.5 ,-1. )],\n ['H', ( 0.,-0. ,-1. )],\n ['H', ( 1.,-0.5 , 0. )],\n ['H', ( 0., 1. , 1. )],\n ['H', ( 1., 2. , 3. )],\n ['H', ( 1., 2. , 4. )],\n ]\n mol.basis = 'sto-3g'\n mol.build()\n\n m = scf.RHF(mol)\n m.kernel()\n norb = m.mo_coeff.shape[1]\n nelec = mol.nelectron\n h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))\n eri = ao2mo.kernel(m._eri, m.mo_coeff, compact=False)\n eri = eri.reshape(norb,norb,norb,norb)\n\n e1, c1 = kernel(h1e, eri, norb, nelec)\n e2, c2 = direct_spin1.kernel(h1e, eri, norb, nelec)\n print(e1, e1 - -11.894559902235565, 'diff to FCI', e1-e2)\n\n print(c1.shape, c2.shape)\n dm1_1 = make_rdm1(c1, norb, nelec)\n dm1_2 = direct_spin1.make_rdm1(c2, norb, nelec)\n print(abs(dm1_1 - dm1_2).sum())\n dm2_1 = make_rdm2(c1, norb, nelec)\n dm2_2 = direct_spin1.make_rdm12(c2, norb, nelec)[1]\n print(abs(dm2_1 - dm2_2).sum())\n\n myci = SelectedCI()\n e, c = kernel_fixed_space(myci, h1e, eri, norb, nelec, c1._strs)\n print(e - -11.894559902235565)\n\n print(myci.large_ci(c1, norb, nelec))\n print(myci.spin_square(c1, norb, nelec)[0] -\n spin_op.spin_square0(to_fci(c1, norb, nelec), norb, nelec)[0])\n\n myci = SelectedCI()\n myci = addons.fix_spin_(myci)\n e1, c1 = myci.kernel(h1e, eri, norb, nelec)\n print(e1, e1 - -11.89467612053687)\n print(myci.spin_square(c1, norb, nelec))\n", "#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Timothy Berkelbach <[email protected]>\n# Qiming Sun <[email protected]>\n#\n\nimport sys\nimport ctypes\nimport numpy\nfrom pyscf import lib\nfrom pyscf.dft import numint\nfrom pyscf.dft.numint import eval_mat, _dot_ao_ao, _dot_ao_dm\nfrom pyscf.dft.numint import _scale_ao, _contract_rho\nfrom pyscf.dft.numint import _rks_gga_wv0, _rks_gga_wv1\nfrom pyscf.dft.numint import _uks_gga_wv0, _uks_gga_wv1\nfrom pyscf.dft.numint import OCCDROP\nfrom pyscf.pbc.dft.gen_grid import libpbc, make_mask, BLKSIZE\nfrom pyscf.pbc.lib.kpts_helper import is_zero, gamma_point, member\n\n#try:\n### Moderate speedup by caching eval_ao\n# from pyscf import pbc\n# from joblib import Memory\n# memory = Memory(cachedir='./tmp/', mmap_mode='r', verbose=0)\n# def memory_cache(f):\n# g = memory.cache(f)\n# def maybe_cache(*args, **kwargs):\n# if pbc.DEBUG:\n# return g(*args, **kwargs)\n# else:\n# return f(*args, **kwargs)\n# return maybe_cache\n#except:\n# memory_cache = lambda f: f\n\ndef eval_ao(cell, coords, kpt=numpy.zeros(3), deriv=0, relativity=0, shls_slice=None,\n non0tab=None, out=None, verbose=None):\n '''Collocate AO crystal orbitals (opt. gradients) on the real-space grid.\n\n Args:\n cell : instance of :class:`Cell`\n\n coords : (nx*ny*nz, 3) ndarray\n The real-space grid point coordinates.\n\n Kwargs:\n kpt : (3,) ndarray\n The k-point corresponding to the crystal AO.\n deriv : int\n AO derivative order. It affects the shape of the return array.\n If deriv=0, the returned AO values are stored in a (N,nao) array.\n Otherwise the AO values are stored in an array of shape (M,N,nao).\n Here N is the number of grids, nao is the number of AO functions,\n M is the size associated to the derivative deriv.\n\n Returns:\n aoR : ([4,] nx*ny*nz, nao=cell.nao_nr()) ndarray\n The value of the AO crystal orbitals on the real-space grid by default.\n If deriv=1, also contains the value of the orbitals gradient in the\n x, y, and z directions. It can be either complex or float array,\n depending on the kpt argument. If kpt is not given (gamma point),\n aoR is a float array.\n\n See Also:\n pyscf.dft.numint.eval_ao\n\n '''\n ao_kpts = eval_ao_kpts(cell, coords, numpy.reshape(kpt, (-1,3)), deriv,\n relativity, shls_slice, non0tab, out, verbose)\n return ao_kpts[0]\n\n\n#@memory_cache\ndef eval_ao_kpts(cell, coords, kpts=None, deriv=0, relativity=0,\n shls_slice=None, non0tab=None, out=None, verbose=None, **kwargs):\n '''\n Returns:\n ao_kpts: (nkpts, [comp], ngrids, nao) ndarray\n AO values at each k-point\n '''\n if kpts is None:\n if 'kpt' in kwargs:\n sys.stderr.write('WARN: KNumInt.eval_ao function finds keyword '\n 'argument \"kpt\" and converts it to \"kpts\"\\n')\n kpts = kwargs['kpt']\n else:\n kpts = numpy.zeros((1,3))\n kpts = numpy.reshape(kpts, (-1,3))\n\n comp = (deriv+1)*(deriv+2)*(deriv+3)//6\n if cell.cart:\n feval = 'GTOval_cart_deriv%d' % deriv\n else:\n feval = 'GTOval_sph_deriv%d' % deriv\n return cell.pbc_eval_gto(feval, coords, comp, kpts,\n shls_slice=shls_slice, non0tab=non0tab, out=out)\n\n\ndef eval_rho(cell, ao, dm, non0tab=None, xctype='LDA', hermi=0, verbose=None):\n '''Collocate the *real* density (opt. gradients) on the real-space grid.\n\n Args:\n cell : instance of :class:`Mole` or :class:`Cell`\n\n ao : ([4,] nx*ny*nz, nao=cell.nao_nr()) ndarray\n The value of the AO crystal orbitals on the real-space grid by default.\n If xctype='GGA', also contains the value of the gradient in the x, y,\n and z directions.\n\n Returns:\n rho : ([4,] nx*ny*nz) ndarray\n The value of the density on the real-space grid. If xctype='GGA',\n also contains the value of the gradient in the x, y, and z\n directions.\n\n See Also:\n pyscf.dft.numint.eval_rho\n\n '''\n\n if xctype == 'LDA' or xctype == 'HF':\n ngrids, nao = ao.shape\n else:\n ngrids, nao = ao[0].shape\n\n if non0tab is None:\n non0tab = numpy.empty(((ngrids+BLKSIZE-1)//BLKSIZE, cell.nbas),\n dtype=numpy.uint8)\n non0tab[:] = 0xff\n\n # complex orbitals or density matrix\n if numpy.iscomplexobj(ao) or numpy.iscomplexobj(dm):\n shls_slice = (0, cell.nbas)\n ao_loc = cell.ao_loc_nr()\n dm = dm.astype(numpy.complex128)\n# For GGA, function eval_rho returns real(|\\nabla i> D_ij <j| + |i> D_ij <\\nabla j|)\n# = real(|\\nabla i> D_ij <j| + |i> D_ij <\\nabla j|)\n# = real(|\\nabla i> D_ij <j| + conj(|\\nabla j> conj(D_ij) < i|))\n# = real(|\\nabla i> D_ij <j|) + real(|\\nabla j> conj(D_ij) < i|)\n# = real(|\\nabla i> [D_ij + (D^\\dagger)_ij] <j|)\n# symmetrization dm (D + D.conj().T) then /2 because the code below computes\n# 2*real(|\\nabla i> D_ij <j|)\n if not hermi:\n dm = (dm + dm.conj().T) * .5\n\n def dot_bra(bra, aodm):\n #:rho = numpy.einsum('pi,pi->p', bra.real, aodm.real)\n #:rho += numpy.einsum('pi,pi->p', bra.imag, aodm.imag)\n #:return rho\n return _contract_rho(bra, aodm)\n\n if xctype == 'LDA' or xctype == 'HF':\n c0 = _dot_ao_dm(cell, ao, dm, non0tab, shls_slice, ao_loc)\n rho = dot_bra(ao, c0)\n\n elif xctype == 'GGA':\n rho = numpy.empty((4,ngrids))\n c0 = _dot_ao_dm(cell, ao[0], dm, non0tab, shls_slice, ao_loc)\n rho[0] = dot_bra(ao[0], c0)\n for i in range(1, 4):\n rho[i] = dot_bra(ao[i], c0) * 2\n\n else:\n # rho[4] = \\nabla^2 rho, rho[5] = 1/2 |nabla f|^2\n rho = numpy.empty((6,ngrids))\n c0 = _dot_ao_dm(cell, ao[0], dm, non0tab, shls_slice, ao_loc)\n rho[0] = dot_bra(ao[0], c0)\n rho[5] = 0\n for i in range(1, 4):\n rho[i] = dot_bra(ao[i], c0) * 2 # *2 for +c.c.\n c1 = _dot_ao_dm(cell, ao[i], dm, non0tab, shls_slice, ao_loc)\n rho[5] += dot_bra(ao[i], c1)\n XX, YY, ZZ = 4, 7, 9\n ao2 = ao[XX] + ao[YY] + ao[ZZ]\n rho[4] = dot_bra(ao2, c0)\n rho[4] += rho[5]\n rho[4] *= 2 # *2 for +c.c.\n rho[5] *= .5\n else:\n # real orbitals and real DM\n rho = numint.eval_rho(cell, ao, dm, non0tab, xctype, hermi, verbose)\n return rho\n\ndef eval_rho2(cell, ao, mo_coeff, mo_occ, non0tab=None, xctype='LDA',\n verbose=None):\n '''Refer to `pyscf.dft.numint.eval_rho2` for full documentation.\n '''\n xctype = xctype.upper()\n if xctype == 'LDA' or xctype == 'HF':\n ngrids, nao = ao.shape\n else:\n ngrids, nao = ao[0].shape\n\n if non0tab is None:\n non0tab = numpy.empty(((ngrids+BLKSIZE-1)//BLKSIZE,cell.nbas),\n dtype=numpy.uint8)\n non0tab[:] = 0xff\n\n # complex orbitals or density matrix\n if numpy.iscomplexobj(ao) or numpy.iscomplexobj(mo_coeff):\n def dot(bra, ket):\n #:rho = numpy.einsum('pi,pi->p', bra.real, ket.real)\n #:rho += numpy.einsum('pi,pi->p', bra.imag, ket.imag)\n #:return rho\n return _contract_rho(bra, ket)\n\n shls_slice = (0, cell.nbas)\n ao_loc = cell.ao_loc_nr()\n pos = mo_occ > OCCDROP\n cpos = numpy.einsum('ij,j->ij', mo_coeff[:,pos], numpy.sqrt(mo_occ[pos]))\n\n if pos.sum() > 0:\n if xctype == 'LDA' or xctype == 'HF':\n c0 = _dot_ao_dm(cell, ao, cpos, non0tab, shls_slice, ao_loc)\n rho = dot(c0, c0)\n elif xctype == 'GGA':\n rho = numpy.empty((4,ngrids))\n c0 = _dot_ao_dm(cell, ao[0], cpos, non0tab, shls_slice, ao_loc)\n rho[0] = dot(c0, c0)\n for i in range(1, 4):\n c1 = _dot_ao_dm(cell, ao[i], cpos, non0tab, shls_slice, ao_loc)\n rho[i] = dot(c0, c1) * 2 # *2 for +c.c.\n else: # meta-GGA\n # rho[4] = \\nabla^2 rho, rho[5] = 1/2 |nabla f|^2\n rho = numpy.empty((6,ngrids))\n c0 = _dot_ao_dm(cell, ao[0], cpos, non0tab, shls_slice, ao_loc)\n rho[0] = dot(c0, c0)\n rho[5] = 0\n for i in range(1, 4):\n c1 = _dot_ao_dm(cell, ao[i], cpos, non0tab, shls_slice, ao_loc)\n rho[i] = dot(c0, c1) * 2 # *2 for +c.c.\n rho[5]+= dot(c1, c1)\n XX, YY, ZZ = 4, 7, 9\n ao2 = ao[XX] + ao[YY] + ao[ZZ]\n c1 = _dot_ao_dm(cell, ao2, cpos, non0tab, shls_slice, ao_loc)\n rho[4] = dot(c0, c1)\n rho[4]+= rho[5]\n rho[4]*= 2\n rho[5]*= .5\n else:\n if xctype == 'LDA' or xctype == 'HF':\n rho = numpy.zeros(ngrids)\n elif xctype == 'GGA':\n rho = numpy.zeros((4,ngrids))\n else:\n rho = numpy.zeros((6,ngrids))\n\n neg = mo_occ < -OCCDROP\n if neg.sum() > 0:\n cneg = numpy.einsum('ij,j->ij', mo_coeff[:,neg], numpy.sqrt(-mo_occ[neg]))\n if xctype == 'LDA' or xctype == 'HF':\n c0 = _dot_ao_dm(cell, ao, cneg, non0tab, shls_slice, ao_loc)\n rho -= dot(c0, c0)\n elif xctype == 'GGA':\n c0 = _dot_ao_dm(cell, ao[0], cneg, non0tab, shls_slice, ao_loc)\n rho[0] -= dot(c0, c0)\n for i in range(1, 4):\n c1 = _dot_ao_dm(cell, ao[i], cneg, non0tab, shls_slice, ao_loc)\n rho[i] -= dot(c0, c1) * 2 # *2 for +c.c.\n else:\n c0 = _dot_ao_dm(cell, ao[0], cneg, non0tab, shls_slice, ao_loc)\n rho[0] -= dot(c0, c0)\n rho5 = 0\n for i in range(1, 4):\n c1 = _dot_ao_dm(cell, ao[i], cneg, non0tab, shls_slice, ao_loc)\n rho[i] -= dot(c0, c1) * 2 # *2 for +c.c.\n rho5 -= dot(c1, c1)\n XX, YY, ZZ = 4, 7, 9\n ao2 = ao[XX] + ao[YY] + ao[ZZ]\n c1 = _dot_ao_dm(cell, ao2, cneg, non0tab, shls_slice, ao_loc)\n rho[4] -= dot(c0, c1) * 2\n rho[4] -= rho5 * 2\n rho[5] -= rho5 * .5\n else:\n rho = numint.eval_rho2(cell, ao, mo_coeff, mo_occ, non0tab, xctype, verbose)\n return rho\n\n\ndef nr_rks(ni, cell, grids, xc_code, dms, spin=0, relativity=0, hermi=0,\n kpts=None, kpts_band=None, max_memory=2000, verbose=None):\n '''Calculate RKS XC functional and potential matrix for given meshgrids and density matrix\n\n Note: This is a replica of pyscf.dft.numint.nr_rks_vxc with kpts added.\n This implemented uses slow function in numint, which only calls eval_rho, eval_mat.\n Faster function uses eval_rho2 which is not yet implemented.\n\n Args:\n ni : an instance of :class:`NumInt` or :class:`KNumInt`\n\n cell : instance of :class:`Mole` or :class:`Cell`\n\n grids : an instance of :class:`Grids`\n grids.coords and grids.weights are needed for coordinates and weights of meshgrids.\n xc_code : str\n XC functional description.\n See :func:`parse_xc` of pyscf/dft/libxc.py for more details.\n dms : 2D/3D array or a list of 2D/3D arrays\n Density matrices (2D) / density matrices for k-points (3D)\n\n Kwargs:\n spin : int\n spin polarized if spin = 1\n relativity : int\n No effects.\n hermi : int\n No effects\n max_memory : int or float\n The maximum size of cache to use (in MB).\n verbose : int or object of :class:`Logger`\n No effects.\n kpts : (3,) ndarray or (nkpts,3) ndarray\n Single or multiple k-points sampled for the DM. Default is gamma point.\n kpts_band : (3,) ndarray or (*,3) ndarray\n A list of arbitrary \"band\" k-points at which to evaluate the XC matrix.\n\n Returns:\n nelec, excsum, vmat.\n nelec is the number of electrons generated by numerical integration.\n excsum is the XC functional value. vmat is the XC potential matrix in\n 2D array of shape (nao,nao) where nao is the number of AO functions.\n '''\n if kpts is None:\n kpts = numpy.zeros((1,3))\n\n xctype = ni._xc_type(xc_code)\n make_rho, nset, nao = ni._gen_rho_evaluator(cell, dms, hermi)\n\n nelec = numpy.zeros(nset)\n excsum = numpy.zeros(nset)\n vmat = [0]*nset\n if xctype == 'LDA':\n ao_deriv = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, kpts_band,\n max_memory):\n for i in range(nset):\n rho = make_rho(i, ao_k2, mask, xctype)\n exc, vxc = ni.eval_xc(xc_code, rho, 0, relativity, 1)[:2]\n vrho = vxc[0]\n den = rho*weight\n nelec[i] += den.sum()\n excsum[i] += (den*exc).sum()\n vmat[i] += ni.eval_mat(cell, ao_k1, weight, rho, vxc,\n mask, xctype, 0, verbose)\n elif xctype == 'GGA':\n ao_deriv = 1\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, kpts_band,\n max_memory):\n for i in range(nset):\n rho = make_rho(i, ao_k2, mask, xctype)\n exc, vxc = ni.eval_xc(xc_code, rho, 0, relativity, 1)[:2]\n den = rho[0]*weight\n nelec[i] += den.sum()\n excsum[i] += (den*exc).sum()\n vmat[i] += ni.eval_mat(cell, ao_k1, weight, rho, vxc,\n mask, xctype, 0, verbose)\n elif xctype == 'MGGA':\n if (any(x in xc_code.upper() for x in ('CC06', 'CS', 'BR89', 'MK00'))):\n raise NotImplementedError('laplacian in meta-GGA method')\n ao_deriv = 2\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, kpts_band,\n max_memory):\n for i in range(nset):\n rho = make_rho(i, ao_k2, mask, xctype)\n exc, vxc = ni.eval_xc(xc_code, rho, 0, relativity, 1)[:2]\n den = rho[0]*weight\n nelec[i] += den.sum()\n excsum[i] += (den*exc).sum()\n vmat[i] += ni.eval_mat(cell, ao_k1, weight, rho, vxc,\n mask, xctype, 0, verbose)\n if nset == 1:\n nelec = nelec[0]\n excsum = excsum[0]\n vmat = vmat[0]\n return nelec, excsum, numpy.asarray(vmat)\n\ndef nr_uks(ni, cell, grids, xc_code, dms, spin=1, relativity=0, hermi=0,\n kpts=None, kpts_band=None, max_memory=2000, verbose=None):\n '''Calculate UKS XC functional and potential matrix for given meshgrids and density matrix\n\n Note: This is a replica of pyscf.dft.numint.nr_rks_vxc with kpts added.\n This implemented uses slow function in numint, which only calls eval_rho, eval_mat.\n Faster function uses eval_rho2 which is not yet implemented.\n\n Args:\n ni : an instance of :class:`NumInt` or :class:`KNumInt`\n\n cell : instance of :class:`Mole` or :class:`Cell`\n\n grids : an instance of :class:`Grids`\n grids.coords and grids.weights are needed for coordinates and weights of meshgrids.\n xc_code : str\n XC functional description.\n See :func:`parse_xc` of pyscf/dft/libxc.py for more details.\n dms :\n Density matrices\n\n Kwargs:\n spin : int\n spin polarized if spin = 1\n relativity : int\n No effects.\n hermi : int\n Input density matrices symmetric or not\n max_memory : int or float\n The maximum size of cache to use (in MB).\n verbose : int or object of :class:`Logger`\n No effects.\n kpts : (3,) ndarray or (nkpts,3) ndarray\n Single or multiple k-points sampled for the DM. Default is gamma point.\n kpts_band : (3,) ndarray or (*,3) ndarray\n A list of arbitrary \"band\" k-points at which to evaluate the XC matrix.\n\n Returns:\n nelec, excsum, vmat.\n nelec is the number of electrons generated by numerical integration.\n excsum is the XC functional value. vmat is the XC potential matrix in\n 2D array of shape (nao,nao) where nao is the number of AO functions.\n '''\n if kpts is None:\n kpts = numpy.zeros((1,3))\n\n xctype = ni._xc_type(xc_code)\n dma, dmb = _format_uks_dm(dms)\n nao = dma.shape[-1]\n make_rhoa, nset = ni._gen_rho_evaluator(cell, dma, hermi)[:2]\n make_rhob = ni._gen_rho_evaluator(cell, dmb, hermi)[0]\n\n nelec = numpy.zeros((2,nset))\n excsum = numpy.zeros(nset)\n vmata = [0]*nset\n vmatb = [0]*nset\n if xctype == 'LDA':\n ao_deriv = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, kpts_band,\n max_memory):\n for i in range(nset):\n rho_a = make_rhoa(i, ao_k2, mask, xctype)\n rho_b = make_rhob(i, ao_k2, mask, xctype)\n exc, vxc = ni.eval_xc(xc_code, (rho_a, rho_b),\n 1, relativity, 1, verbose)[:2]\n vrho = vxc[0]\n den = rho_a * weight\n nelec[0,i] += den.sum()\n excsum[i] += (den*exc).sum()\n den = rho_b * weight\n nelec[1,i] += den.sum()\n excsum[i] += (den*exc).sum()\n\n vmata[i] += ni.eval_mat(cell, ao_k1, weight, rho_a, vrho[:,0],\n mask, xctype, 1, verbose)\n vmatb[i] += ni.eval_mat(cell, ao_k1, weight, rho_b, vrho[:,1],\n mask, xctype, 1, verbose)\n elif xctype == 'GGA':\n ao_deriv = 1\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts,\n kpts_band, max_memory):\n for i in range(nset):\n rho_a = make_rhoa(i, ao_k2, mask, xctype)\n rho_b = make_rhob(i, ao_k2, mask, xctype)\n exc, vxc = ni.eval_xc(xc_code, (rho_a, rho_b),\n 1, relativity, 1, verbose)[:2]\n vrho, vsigma = vxc[:2]\n den = rho_a[0]*weight\n nelec[0,i] += den.sum()\n excsum[i] += (den*exc).sum()\n den = rho_b[0]*weight\n nelec[1,i] += den.sum()\n excsum[i] += (den*exc).sum()\n\n vmata[i] += ni.eval_mat(cell, ao_k1, weight, (rho_a,rho_b),\n (vrho[:,0], (vsigma[:,0],vsigma[:,1])),\n mask, xctype, 1, verbose)\n vmatb[i] += ni.eval_mat(cell, ao_k1, weight, (rho_b,rho_a),\n (vrho[:,1], (vsigma[:,2],vsigma[:,1])),\n mask, xctype, 1, verbose)\n elif xctype == 'MGGA':\n assert(all(x not in xc_code.upper() for x in ('CC06', 'CS', 'BR89', 'MK00')))\n ao_deriv = 2\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, kpts_band,\n max_memory):\n for i in range(nset):\n rho_a = make_rhoa(i, ao_k2, mask, xctype)\n rho_b = make_rhob(i, ao_k2, mask, xctype)\n exc, vxc = ni.eval_xc(xc_code, (rho_a, rho_b),\n 1, relativity, 1, verbose)[:2]\n vrho, vsigma, vlapl, vtau = vxc\n den = rho_a[0]*weight\n nelec[0,i] += den.sum()\n excsum[i] += (den*exc).sum()\n den = rho_b[0]*weight\n nelec[1,i] += den.sum()\n excsum[i] += (den*exc).sum()\n\n v = (vrho[:,0], (vsigma[:,0],vsigma[:,1]), None, vtau[:,0])\n vmata[i] += ni.eval_mat(cell, ao_k1, weight, (rho_a,rho_b), v,\n mask, xctype, 1, verbose)\n v = (vrho[:,1], (vsigma[:,2],vsigma[:,1]), None, vtau[:,1])\n vmatb[i] += ni.eval_mat(cell, ao_k1, weight, (rho_b,rho_a), v,\n mask, xctype, 1, verbose)\n v = None\n\n if dma.ndim == vmata[0].ndim: # One set of DMs in the input\n nelec = nelec[:,0]\n excsum = excsum[0]\n vmata = vmata[0]\n vmatb = vmatb[0]\n return nelec, excsum, numpy.asarray((vmata,vmatb))\n\ndef _format_uks_dm(dms):\n dma, dmb = dms\n if getattr(dms, 'mo_coeff', None) is not None:\n#TODO: test whether dm.mo_coeff matching dm\n mo_coeff = dms.mo_coeff\n mo_occ = dms.mo_occ\n if (isinstance(mo_coeff[0], numpy.ndarray) and\n mo_coeff[0].ndim < dma.ndim): # handle ROKS\n mo_occa = [numpy.array(occ> 0, dtype=numpy.double) for occ in mo_occ]\n mo_occb = [numpy.array(occ==2, dtype=numpy.double) for occ in mo_occ]\n dma = lib.tag_array(dma, mo_coeff=mo_coeff, mo_occ=mo_occa)\n dmb = lib.tag_array(dmb, mo_coeff=mo_coeff, mo_occ=mo_occb)\n else:\n dma = lib.tag_array(dma, mo_coeff=mo_coeff[0], mo_occ=mo_occ[0])\n dmb = lib.tag_array(dmb, mo_coeff=mo_coeff[1], mo_occ=mo_occ[1])\n return dma, dmb\n\nnr_rks_vxc = nr_rks\nnr_uks_vxc = nr_uks\n\ndef nr_rks_fxc(ni, cell, grids, xc_code, dm0, dms, relativity=0, hermi=0,\n rho0=None, vxc=None, fxc=None, kpts=None, max_memory=2000,\n verbose=None):\n '''Contract RKS XC kernel matrix with given density matrices\n\n Args:\n ni : an instance of :class:`NumInt` or :class:`KNumInt`\n\n cell : instance of :class:`Mole` or :class:`Cell`\n\n grids : an instance of :class:`Grids`\n grids.coords and grids.weights are needed for coordinates and weights of meshgrids.\n xc_code : str\n XC functional description.\n See :func:`parse_xc` of pyscf/dft/libxc.py for more details.\n dms : 2D/3D array or a list of 2D/3D arrays\n Density matrices (2D) / density matrices for k-points (3D)\n\n Kwargs:\n hermi : int\n Input density matrices symmetric or not\n max_memory : int or float\n The maximum size of cache to use (in MB).\n rho0 : float array\n Zero-order density (and density derivative for GGA). Giving kwargs rho0,\n vxc and fxc to improve better performance.\n vxc : float array\n First order XC derivatives\n fxc : float array\n Second order XC derivatives\n\n Examples:\n\n '''\n if kpts is None:\n kpts = numpy.zeros((1,3))\n xctype = ni._xc_type(xc_code)\n\n make_rho, nset, nao = ni._gen_rho_evaluator(cell, dms, hermi)\n if ((xctype == 'LDA' and fxc is None) or\n (xctype == 'GGA' and rho0 is None)):\n make_rho0 = ni._gen_rho_evaluator(cell, dm0, 1)[0]\n\n ao_loc = cell.ao_loc_nr()\n vmat = [0] * nset\n if xctype == 'LDA':\n ao_deriv = 0\n ip = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n ngrid = weight.size\n if fxc is None:\n rho = make_rho0(0, ao_k1, mask, xctype)\n fxc0 = ni.eval_xc(xc_code, rho, 0, relativity, 2, verbose)[2]\n frr = fxc0[0]\n else:\n frr = fxc[0][ip:ip+ngrid]\n ip += ngrid\n\n for i in range(nset):\n rho1 = make_rho(i, ao_k1, mask, xctype)\n wv = weight * frr * rho1\n vmat[i] += ni._fxc_mat(cell, ao_k1, wv, mask, xctype, ao_loc)\n\n elif xctype == 'GGA':\n ao_deriv = 1\n ip = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n ngrid = weight.size\n if rho0 is None:\n rho = make_rho0(0, ao_k1, mask, xctype)\n else:\n rho = numpy.asarray(rho0[:,ip:ip+ngrid], order='C')\n\n if vxc is None or fxc is None:\n vxc0, fxc0 = ni.eval_xc(xc_code, rho, 0, relativity, 2, verbose)[1:3]\n else:\n vxc0 = (None, vxc[1][ip:ip+ngrid])\n fxc0 = (fxc[0][ip:ip+ngrid], fxc[1][ip:ip+ngrid], fxc[2][ip:ip+ngrid])\n ip += ngrid\n\n for i in range(nset):\n rho1 = make_rho(i, ao_k1, mask, xctype)\n wv = _rks_gga_wv1(rho, rho1, vxc0, fxc0, weight)\n vmat[i] += ni._fxc_mat(cell, ao_k1, wv, mask, xctype, ao_loc)\n\n # call swapaxes method to swap last two indices because vmat may be a 3D\n # array (nset,nao,nao) in single k-point mode or a 4D array\n # (nset,nkpts,nao,nao) in k-points mode\n for i in range(nset): # for (\\nabla\\mu) \\nu + \\mu (\\nabla\\nu)\n vmat[i] = vmat[i] + vmat[i].swapaxes(-2,-1).conj()\n\n elif xctype == 'MGGA':\n raise NotImplementedError('meta-GGA')\n\n if isinstance(dms, numpy.ndarray) and dms.ndim == vmat[0].ndim:\n # One set of DMs in the input\n vmat = vmat[0]\n return numpy.asarray(vmat)\n\ndef nr_rks_fxc_st(ni, cell, grids, xc_code, dm0, dms_alpha, relativity=0, singlet=True,\n rho0=None, vxc=None, fxc=None, kpts=None, max_memory=2000,\n verbose=None):\n '''Associated to singlet or triplet Hessian\n Note the difference to nr_rks_fxc, dms_alpha is the response density\n matrices of alpha spin, alpha+/-beta DM is applied due to singlet/triplet\n coupling\n\n Ref. CPL, 256, 454\n '''\n if kpts is None:\n kpts = numpy.zeros((1,3))\n xctype = ni._xc_type(xc_code)\n\n make_rho, nset, nao = ni._gen_rho_evaluator(cell, dms_alpha)\n if ((xctype == 'LDA' and fxc is None) or\n (xctype == 'GGA' and rho0 is None)):\n make_rho0 = ni._gen_rho_evaluator(cell, dm0, 1)[0]\n\n ao_loc = cell.ao_loc_nr()\n vmat = [0] * nset\n if xctype == 'LDA':\n ao_deriv = 0\n ip = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n ngrid = weight.size\n if fxc is None:\n rho = make_rho0(0, ao_k1, mask, xctype)\n rho *= .5 # alpha density\n fxc0 = ni.eval_xc(xc_code, (rho,rho), 1, deriv=2)[2]\n u_u, u_d, d_d = fxc0[0].T\n else:\n u_u, u_d, d_d = fxc[0][ip:ip+ngrid].T\n ip += ngrid\n if singlet:\n frho = u_u + u_d\n else:\n frho = u_u - u_d\n\n for i in range(nset):\n rho1 = make_rho(i, ao_k1, mask, xctype)\n wv = weight * frho * rho1\n vmat[i] += ni._fxc_mat(cell, ao_k1, wv, mask, xctype, ao_loc)\n\n elif xctype == 'GGA':\n ao_deriv = 1\n ip = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n ngrid = weight.size\n if vxc is None or fxc is None:\n rho = make_rho0(0, ao_k1, mask, xctype)\n rho *= .5 # alpha density\n vxc0, fxc0 = ni.eval_xc(xc_code, (rho,rho), 1, deriv=2)[1:3]\n\n vsigma = vxc0[1].T\n u_u, u_d, d_d = fxc0[0].T # v2rho2\n u_uu, u_ud, u_dd, d_uu, d_ud, d_dd = fxc0[1].T # v2rhosigma\n uu_uu, uu_ud, uu_dd, ud_ud, ud_dd, dd_dd = fxc0[2].T # v2sigma2\n else:\n rho = rho0[0][:,ip:ip+ngrid]\n vsigma = vxc[1][ip:ip+ngrid].T\n u_u, u_d, d_d = fxc[0][ip:ip+ngrid].T # v2rho2\n u_uu, u_ud, u_dd, d_uu, d_ud, d_dd = fxc[1][ip:ip+ngrid].T # v2rhosigma\n uu_uu, uu_ud, uu_dd, ud_ud, ud_dd, dd_dd = fxc[2][ip:ip+ngrid].T # v2sigma2\n\n if singlet:\n fgamma = vsigma[0] + vsigma[1] * .5\n frho = u_u + u_d\n fgg = uu_uu + .5*ud_ud + 2*uu_ud + uu_dd\n frhogamma = u_uu + u_dd + u_ud\n else:\n fgamma = vsigma[0] - vsigma[1] * .5\n frho = u_u - u_d\n fgg = uu_uu - uu_dd\n frhogamma = u_uu - u_dd\n\n for i in range(nset):\n # rho1[0 ] = |b><j| z_{bj}\n # rho1[1:] = \\nabla(|b><j|) z_{bj}\n rho1 = make_rho(i, ao_k1, mask, xctype)\n wv = _rks_gga_wv1(rho, rho1, (None,fgamma),\n (frho,frhogamma,fgg), weight)\n vmat[i] += ni._fxc_mat(cell, ao_k1, wv, mask, xctype, ao_loc)\n\n for i in range(nset): # for (\\nabla\\mu) \\nu + \\mu (\\nabla\\nu)\n vmat[i] = vmat[i] + vmat[i].swapaxes(-2,-1).conj()\n\n elif xctype == 'MGGA':\n raise NotImplementedError('meta-GGA')\n\n if isinstance(dms_alpha, numpy.ndarray) and dms_alpha.ndim == vmat[0].ndim:\n vmat = vmat[0]\n return numpy.asarray(vmat)\n\n\ndef nr_uks_fxc(ni, cell, grids, xc_code, dm0, dms, relativity=0, hermi=0,\n rho0=None, vxc=None, fxc=None, kpts=None, max_memory=2000,\n verbose=None):\n '''Contract UKS XC kernel matrix with given density matrices\n\n Args:\n ni : an instance of :class:`NumInt` or :class:`KNumInt`\n\n cell : instance of :class:`Mole` or :class:`Cell`\n\n grids : an instance of :class:`Grids`\n grids.coords and grids.weights are needed for coordinates and weights of meshgrids.\n xc_code : str\n XC functional description.\n See :func:`parse_xc` of pyscf/dft/libxc.py for more details.\n dms : 2D array a list of 2D arrays\n Density matrix or multiple density matrices\n\n Kwargs:\n hermi : int\n Input density matrices symmetric or not\n max_memory : int or float\n The maximum size of cache to use (in MB).\n rho0 : float array\n Zero-order density (and density derivative for GGA). Giving kwargs rho0,\n vxc and fxc to improve better performance.\n vxc : float array\n First order XC derivatives\n fxc : float array\n Second order XC derivatives\n\n Returns:\n nelec, excsum, vmat.\n nelec is the number of electrons generated by numerical integration.\n excsum is the XC functional value. vmat is the XC potential matrix in\n 2D array of shape (nao,nao) where nao is the number of AO functions.\n\n Examples:\n\n '''\n if kpts is None:\n kpts = numpy.zeros((1,3))\n xctype = ni._xc_type(xc_code)\n\n dma, dmb = _format_uks_dm(dms)\n nao = dma.shape[-1]\n make_rhoa, nset = ni._gen_rho_evaluator(cell, dma, hermi)[:2]\n make_rhob = ni._gen_rho_evaluator(cell, dmb, hermi)[0]\n\n if ((xctype == 'LDA' and fxc is None) or\n (xctype == 'GGA' and rho0 is None)):\n dm0a, dm0b = _format_uks_dm(dm0)\n make_rho0a = ni._gen_rho_evaluator(cell, dm0a, 1)[0]\n make_rho0b = ni._gen_rho_evaluator(cell, dm0b, 1)[0]\n\n shls_slice = (0, cell.nbas)\n ao_loc = cell.ao_loc_nr()\n\n vmata = [0] * nset\n vmatb = [0] * nset\n if xctype == 'LDA':\n ao_deriv = 0\n ip = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n ngrid = weight.size\n if fxc is None:\n rho0a = make_rho0a(0, ao_k1, mask, xctype)\n rho0b = make_rho0b(0, ao_k1, mask, xctype)\n fxc0 = ni.eval_xc(xc_code, (rho0a,rho0b), 1, relativity, 2, verbose)[2]\n u_u, u_d, d_d = fxc0[0].T\n else:\n u_u, u_d, d_d = fxc[0][ip:ip+ngrid].T\n ip += ngrid\n\n for i in range(nset):\n rho1a = make_rhoa(i, ao_k1, mask, xctype)\n rho1b = make_rhob(i, ao_k1, mask, xctype)\n wv = u_u * rho1a + u_d * rho1b\n wv *= weight\n vmata[i] += ni._fxc_mat(cell, ao_k1, wv, mask, xctype, ao_loc)\n wv = u_d * rho1a + d_d * rho1b\n wv *= weight\n vmatb[i] += ni._fxc_mat(cell, ao_k1, wv, mask, xctype, ao_loc)\n\n elif xctype == 'GGA':\n ao_deriv = 1\n ip = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n ngrid = weight.size\n if rho0 is None:\n rho0a = make_rho0a(0, ao_k1, mask, xctype)\n rho0b = make_rho0b(0, ao_k1, mask, xctype)\n else:\n rho0a = rho0[0][:,ip:ip+ngrid]\n rho0b = rho0[1][:,ip:ip+ngrid]\n if vxc is None or fxc is None:\n vxc0, fxc0 = ni.eval_xc(xc_code, (rho0a,rho0b), 1, relativity, 2, verbose)[1:3]\n else:\n vxc0 = (None, vxc[1][ip:ip+ngrid])\n fxc0 = (fxc[0][ip:ip+ngrid], fxc[1][ip:ip+ngrid], fxc[2][ip:ip+ngrid])\n ip += ngrid\n\n for i in range(nset):\n rho1a = make_rhoa(i, ao_k1, mask, xctype)\n rho1b = make_rhob(i, ao_k1, mask, xctype)\n wva, wvb = _uks_gga_wv1((rho0a,rho0b), (rho1a,rho1b),\n vxc0, fxc0, weight)\n vmata[i] += ni._fxc_mat(cell, ao_k1, wva, mask, xctype, ao_loc)\n vmatb[i] += ni._fxc_mat(cell, ao_k1, wvb, mask, xctype, ao_loc)\n\n for i in range(nset): # for (\\nabla\\mu) \\nu + \\mu (\\nabla\\nu)\n vmata[i] = vmata[i] + vmata[i].swapaxes(-1,-2).conj()\n vmatb[i] = vmatb[i] + vmatb[i].swapaxes(-1,-2).conj()\n elif xctype == 'MGGA':\n raise NotImplementedError('meta-GGA')\n\n if dma.ndim == vmata[0].ndim: # One set of DMs in the input\n vmata = vmata[0]\n vmatb = vmatb[0]\n return numpy.asarray((vmata,vmatb))\n\ndef _fxc_mat(cell, ao, wv, non0tab, xctype, ao_loc):\n shls_slice = (0, cell.nbas)\n\n if xctype == 'LDA' or xctype == 'HF':\n #:aow = numpy.einsum('pi,p->pi', ao, wv)\n aow = _scale_ao(ao, wv)\n mat = _dot_ao_ao(cell, ao, aow, non0tab, shls_slice, ao_loc)\n else:\n #:aow = numpy.einsum('npi,np->pi', ao, wv)\n aow = _scale_ao(ao, wv)\n mat = _dot_ao_ao(cell, ao[0], aow, non0tab, shls_slice, ao_loc)\n return mat\n\ndef cache_xc_kernel(ni, cell, grids, xc_code, mo_coeff, mo_occ, spin=0,\n kpts=None, max_memory=2000):\n '''Compute the 0th order density, Vxc and fxc. They can be used in TDDFT,\n DFT hessian module etc.\n '''\n if kpts is None:\n kpts = numpy.zeros((1,3))\n xctype = ni._xc_type(xc_code)\n ao_deriv = 0\n if xctype == 'GGA':\n ao_deriv = 1\n elif xctype == 'MGGA':\n raise NotImplementedError('meta-GGA')\n\n nao = cell.nao_nr()\n if spin == 0:\n rho = []\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n rho.append(ni.eval_rho2(cell, ao_k1, mo_coeff, mo_occ, mask, xctype))\n rho = numpy.hstack(rho)\n else:\n rhoa = []\n rhob = []\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n rhoa.append(ni.eval_rho2(cell, ao_k1, mo_coeff[0], mo_occ[0], mask, xctype))\n rhob.append(ni.eval_rho2(cell, ao_k1, mo_coeff[1], mo_occ[1], mask, xctype))\n rho = (numpy.hstack(rhoa), numpy.hstack(rhob))\n vxc, fxc = ni.eval_xc(xc_code, rho, spin, 0, 2, 0)[1:3]\n return rho, vxc, fxc\n\n\ndef get_rho(ni, cell, dm, grids, kpts=numpy.zeros((1,3)), max_memory=2000):\n '''Density in real space\n '''\n make_rho, nset, nao = ni._gen_rho_evaluator(cell, dm)\n assert(nset == 1)\n rho = numpy.empty(grids.weights.size)\n p1 = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, 0, kpts, None, max_memory):\n p0, p1 = p1, p1 + weight.size\n rho[p0:p1] = make_rho(0, ao_k1, mask, 'LDA')\n return rho\n\n\nclass NumInt(numint.NumInt):\n '''Generalization of pyscf's NumInt class for a single k-point shift and\n periodic images.\n '''\n def eval_ao(self, cell, coords, kpt=numpy.zeros(3), deriv=0, relativity=0,\n shls_slice=None, non0tab=None, out=None, verbose=None):\n return eval_ao(cell, coords, kpt, deriv, relativity, shls_slice,\n non0tab, out, verbose)\n\n @lib.with_doc(make_mask.__doc__)\n def make_mask(self, cell, coords, relativity=0, shls_slice=None,\n verbose=None):\n return make_mask(cell, coords, relativity, shls_slice, verbose)\n\n @lib.with_doc(eval_rho.__doc__)\n def eval_rho(self, cell, ao, dm, non0tab=None, xctype='LDA', hermi=0, verbose=None):\n return eval_rho(cell, ao, dm, non0tab, xctype, hermi, verbose)\n\n def eval_rho2(self, cell, ao, mo_coeff, mo_occ, non0tab=None, xctype='LDA',\n verbose=None):\n return eval_rho2(cell, ao, mo_coeff, mo_occ, non0tab, xctype, verbose)\n\n def nr_vxc(self, cell, grids, xc_code, dms, spin=0, relativity=0, hermi=0,\n kpt=None, kpts_band=None, max_memory=2000, verbose=None):\n '''Evaluate RKS/UKS XC functional and potential matrix.\n See :func:`nr_rks` and :func:`nr_uks` for more details.\n '''\n if spin == 0:\n return self.nr_rks(cell, grids, xc_code, dms, hermi,\n kpt, kpts_band, max_memory, verbose)\n else:\n return self.nr_uks(cell, grids, xc_code, dms, hermi,\n kpt, kpts_band, max_memory, verbose)\n\n @lib.with_doc(nr_rks.__doc__)\n def nr_rks(self, cell, grids, xc_code, dms, hermi=0,\n kpt=numpy.zeros(3), kpts_band=None, max_memory=2000, verbose=None):\n if kpts_band is not None:\n# To compute Vxc on kpts_band, convert the NumInt object to KNumInt object.\n ni = KNumInt()\n ni.__dict__.update(self.__dict__)\n nao = dms.shape[-1]\n return ni.nr_rks(cell, grids, xc_code, dms.reshape(-1,1,nao,nao),\n hermi, kpt.reshape(1,3), kpts_band, max_memory,\n verbose)\n return nr_rks(self, cell, grids, xc_code, dms,\n 0, 0, hermi, kpt, kpts_band, max_memory, verbose)\n\n @lib.with_doc(nr_uks.__doc__)\n def nr_uks(self, cell, grids, xc_code, dms, hermi=0,\n kpt=numpy.zeros(3), kpts_band=None, max_memory=2000, verbose=None):\n if kpts_band is not None:\n# To compute Vxc on kpts_band, convert the NumInt object to KNumInt object.\n ni = KNumInt()\n ni.__dict__.update(self.__dict__)\n nao = dms[0].shape[-1]\n return ni.nr_uks(cell, grids, xc_code, dms.reshape(-1,1,nao,nao),\n hermi, kpt.reshape(1,3), kpts_band, max_memory,\n verbose)\n return nr_uks(self, cell, grids, xc_code, dms,\n 1, 0, hermi, kpt, kpts_band, max_memory, verbose)\n\n def eval_mat(self, cell, ao, weight, rho, vxc,\n non0tab=None, xctype='LDA', spin=0, verbose=None):\n# Guess whether ao is evaluated for kpts_band. When xctype is LDA, ao on grids\n# should be a 2D array. For other xc functional, ao should be a 3D array.\n if ao.ndim == 2 or (xctype != 'LDA' and ao.ndim == 3):\n mat = eval_mat(cell, ao, weight, rho, vxc, non0tab, xctype, spin, verbose)\n else:\n nkpts = len(ao)\n nao = ao[0].shape[-1]\n mat = numpy.empty((nkpts,nao,nao), dtype=numpy.complex128)\n for k in range(nkpts):\n mat[k] = eval_mat(cell, ao[k], weight, rho, vxc,\n non0tab, xctype, spin, verbose)\n return mat\n\n def _fxc_mat(self, cell, ao, wv, non0tab, xctype, ao_loc):\n return _fxc_mat(cell, ao, wv, non0tab, xctype, ao_loc)\n\n def block_loop(self, cell, grids, nao, deriv=0, kpt=numpy.zeros(3),\n kpts_band=None, max_memory=2000, non0tab=None, blksize=None):\n '''Define this macro to loop over grids by blocks.\n '''\n# For UniformGrids, grids.coords does not indicate whehter grids are initialized\n if grids.non0tab is None:\n grids.build(with_non0tab=True)\n grids_coords = grids.coords\n grids_weights = grids.weights\n ngrids = grids_coords.shape[0]\n comp = (deriv+1)*(deriv+2)*(deriv+3)//6\n# NOTE to index grids.non0tab, the blksize needs to be the integer multiplier of BLKSIZE\n if blksize is None:\n blksize = int(max_memory*1e6/(comp*2*nao*16*BLKSIZE))*BLKSIZE\n blksize = max(BLKSIZE, min(blksize, ngrids, BLKSIZE*1200))\n if non0tab is None:\n non0tab = grids.non0tab\n if non0tab is None:\n non0tab = numpy.empty(((ngrids+BLKSIZE-1)//BLKSIZE,cell.nbas),\n dtype=numpy.uint8)\n non0tab[:] = 0xff\n kpt = numpy.reshape(kpt, 3)\n if kpts_band is None:\n kpt1 = kpt2 = kpt\n else:\n kpt1 = kpts_band\n kpt2 = kpt\n\n for ip0 in range(0, ngrids, blksize):\n ip1 = min(ngrids, ip0+blksize)\n coords = grids_coords[ip0:ip1]\n weight = grids_weights[ip0:ip1]\n non0 = non0tab[ip0//BLKSIZE:]\n ao_k2 = self.eval_ao(cell, coords, kpt2, deriv=deriv, non0tab=non0)\n if abs(kpt1-kpt2).sum() < 1e-9:\n ao_k1 = ao_k2\n else:\n ao_k1 = self.eval_ao(cell, coords, kpt1, deriv=deriv)\n yield ao_k1, ao_k2, non0, weight, coords\n ao_k1 = ao_k2 = None\n\n def _gen_rho_evaluator(self, cell, dms, hermi=0):\n return numint.NumInt._gen_rho_evaluator(self, cell, dms, hermi)\n\n nr_rks_fxc = nr_rks_fxc\n nr_uks_fxc = nr_uks_fxc\n cache_xc_kernel = cache_xc_kernel\n get_rho = get_rho\n\n def rsh_and_hybrid_coeff(self, xc_code, spin=0):\n omega, alpha, hyb = numint.NumInt.rsh_and_hybrid_coeff(self, xc_code, spin)\n if abs(omega) > 1e-10:\n raise NotImplementedError\n return omega, alpha, hyb\n_NumInt = NumInt\n\n\nclass KNumInt(numint.NumInt):\n '''Generalization of pyscf's NumInt class for k-point sampling and\n periodic images.\n '''\n def __init__(self, kpts=numpy.zeros((1,3))):\n numint.NumInt.__init__(self)\n self.kpts = numpy.reshape(kpts, (-1,3))\n\n def eval_ao(self, cell, coords, kpts=numpy.zeros((1,3)), deriv=0, relativity=0,\n shls_slice=None, non0tab=None, out=None, verbose=None, **kwargs):\n return eval_ao_kpts(cell, coords, kpts, deriv,\n relativity, shls_slice, non0tab, out, verbose)\n\n @lib.with_doc(make_mask.__doc__)\n def make_mask(self, cell, coords, relativity=0, shls_slice=None,\n verbose=None):\n return make_mask(cell, coords, relativity, shls_slice, verbose)\n\n def eval_rho(self, cell, ao_kpts, dm_kpts, non0tab=None, xctype='LDA',\n hermi=0, verbose=None):\n '''Collocate the *real* density (opt. gradients) on the real-space grid.\n\n Args:\n cell : Mole or Cell object\n ao_kpts : (nkpts, ngrids, nao) ndarray\n AO values at each k-point\n dm_kpts: (nkpts, nao, nao) ndarray\n Density matrix at each k-point\n\n Returns:\n rhoR : (ngrids,) ndarray\n '''\n nkpts = len(ao_kpts)\n rhoR = 0\n for k in range(nkpts):\n rhoR += eval_rho(cell, ao_kpts[k], dm_kpts[k], non0tab, xctype,\n hermi, verbose)\n rhoR *= 1./nkpts\n return rhoR\n\n def eval_rho2(self, cell, ao_kpts, mo_coeff_kpts, mo_occ_kpts,\n non0tab=None, xctype='LDA', verbose=None):\n nkpts = len(ao_kpts)\n rhoR = 0\n for k in range(nkpts):\n rhoR += eval_rho2(cell, ao_kpts[k], mo_coeff_kpts[k],\n mo_occ_kpts[k], non0tab, xctype, verbose)\n rhoR *= 1./nkpts\n return rhoR\n\n def nr_vxc(self, cell, grids, xc_code, dms, spin=0, relativity=0, hermi=0,\n kpts=None, kpts_band=None, max_memory=2000, verbose=None):\n '''Evaluate RKS/UKS XC functional and potential matrix.\n See :func:`nr_rks` and :func:`nr_uks` for more details.\n '''\n if spin == 0:\n return self.nr_rks(cell, grids, xc_code, dms, hermi,\n kpts, kpts_band, max_memory, verbose)\n else:\n return self.nr_uks(cell, grids, xc_code, dms, hermi,\n kpts, kpts_band, max_memory, verbose)\n\n @lib.with_doc(nr_rks.__doc__)\n def nr_rks(self, cell, grids, xc_code, dms, hermi=0, kpts=None, kpts_band=None,\n max_memory=2000, verbose=None, **kwargs):\n if kpts is None:\n if 'kpt' in kwargs:\n sys.stderr.write('WARN: KNumInt.nr_rks function finds keyword '\n 'argument \"kpt\" and converts it to \"kpts\"\\n')\n kpts = kwargs['kpt']\n else:\n kpts = self.kpts\n kpts = kpts.reshape(-1,3)\n\n return nr_rks(self, cell, grids, xc_code, dms, 0, 0,\n hermi, kpts, kpts_band, max_memory, verbose)\n\n @lib.with_doc(nr_uks.__doc__)\n def nr_uks(self, cell, grids, xc_code, dms, hermi=0, kpts=None, kpts_band=None,\n max_memory=2000, verbose=None, **kwargs):\n if kpts is None:\n if 'kpt' in kwargs:\n sys.stderr.write('WARN: KNumInt.nr_uks function finds keyword '\n 'argument \"kpt\" and converts it to \"kpts\"\\n')\n kpts = kwargs['kpt']\n else:\n kpts = self.kpts\n kpts = kpts.reshape(-1,3)\n\n return nr_uks(self, cell, grids, xc_code, dms, 1, 0,\n hermi, kpts, kpts_band, max_memory, verbose)\n\n def eval_mat(self, cell, ao_kpts, weight, rho, vxc,\n non0tab=None, xctype='LDA', spin=0, verbose=None):\n nkpts = len(ao_kpts)\n nao = ao_kpts[0].shape[-1]\n dtype = numpy.result_type(*ao_kpts)\n mat = numpy.empty((nkpts,nao,nao), dtype=dtype)\n for k in range(nkpts):\n mat[k] = eval_mat(cell, ao_kpts[k], weight, rho, vxc,\n non0tab, xctype, spin, verbose)\n return mat\n\n def _fxc_mat(self, cell, ao_kpts, wv, non0tab, xctype, ao_loc):\n nkpts = len(ao_kpts)\n nao = ao_kpts[0].shape[-1]\n dtype = numpy.result_type(*ao_kpts)\n mat = numpy.empty((nkpts,nao,nao), dtype=dtype)\n for k in range(nkpts):\n mat[k] = _fxc_mat(cell, ao_kpts[k], wv, non0tab, xctype, ao_loc)\n return mat\n\n def block_loop(self, cell, grids, nao, deriv=0, kpts=numpy.zeros((1,3)),\n kpts_band=None, max_memory=2000, non0tab=None, blksize=None):\n '''Define this macro to loop over grids by blocks.\n '''\n if grids.coords is None:\n grids.build(with_non0tab=True)\n grids_coords = grids.coords\n grids_weights = grids.weights\n ngrids = grids_coords.shape[0]\n nkpts = len(kpts)\n comp = (deriv+1)*(deriv+2)*(deriv+3)//6\n# NOTE to index grids.non0tab, the blksize needs to be the integer multiplier of BLKSIZE\n if blksize is None:\n blksize = int(max_memory*1e6/(comp*2*nkpts*nao*16*BLKSIZE))*BLKSIZE\n blksize = max(BLKSIZE, min(blksize, ngrids, BLKSIZE*1200))\n if non0tab is None:\n non0tab = grids.non0tab\n if non0tab is None:\n non0tab = numpy.empty(((ngrids+BLKSIZE-1)//BLKSIZE,cell.nbas),\n dtype=numpy.uint8)\n non0tab[:] = 0xff\n if kpts_band is not None:\n kpts_band = numpy.reshape(kpts_band, (-1,3))\n where = [member(k, kpts) for k in kpts_band]\n where = [k_id[0] if len(k_id)>0 else None for k_id in where]\n\n for ip0 in range(0, ngrids, blksize):\n ip1 = min(ngrids, ip0+blksize)\n coords = grids_coords[ip0:ip1]\n weight = grids_weights[ip0:ip1]\n non0 = non0tab[ip0//BLKSIZE:]\n ao_k1 = ao_k2 = self.eval_ao(cell, coords, kpts, deriv=deriv, non0tab=non0)\n if kpts_band is not None:\n ao_k1 = self.eval_ao(cell, coords, kpts_band, deriv=deriv, non0tab=non0)\n yield ao_k1, ao_k2, non0, weight, coords\n ao_k1 = ao_k2 = None\n\n def _gen_rho_evaluator(self, cell, dms, hermi=0):\n if getattr(dms, 'mo_coeff', None) is not None:\n mo_coeff = dms.mo_coeff\n mo_occ = dms.mo_occ\n if isinstance(dms[0], numpy.ndarray) and dms[0].ndim == 2:\n mo_coeff = [mo_coeff]\n mo_occ = [mo_occ]\n nao = cell.nao_nr()\n ndms = len(mo_occ)\n def make_rho(idm, ao, non0tab, xctype):\n return self.eval_rho2(cell, ao, mo_coeff[idm], mo_occ[idm],\n non0tab, xctype)\n else:\n if isinstance(dms[0], numpy.ndarray) and dms[0].ndim == 2:\n dms = [numpy.stack(dms)]\n #if not hermi:\n # Density (or response of density) is always real for DFT.\n # Symmetrizing DM for gamma point should not change the value of\n # density. However, when k-point is considered, unless dm and\n # dm.conj().transpose produce the same real part of density, the\n # symmetrization code below may be incorrect (proof is needed).\n # # dm.shape = (nkpts, nao, nao)\n # dms = [(dm+dm.conj().transpose(0,2,1))*.5 for dm in dms]\n nao = dms[0].shape[-1]\n ndms = len(dms)\n def make_rho(idm, ao_kpts, non0tab, xctype):\n return self.eval_rho(cell, ao_kpts, dms[idm], non0tab, xctype,\n hermi=hermi)\n return make_rho, ndms, nao\n\n nr_rks_fxc = nr_rks_fxc\n nr_uks_fxc = nr_uks_fxc\n cache_xc_kernel = cache_xc_kernel\n get_rho = get_rho\n\n def rsh_and_hybrid_coeff(self, xc_code, spin=0):\n omega, alpha, hyb = numint.NumInt.rsh_and_hybrid_coeff(self, xc_code, spin)\n if abs(omega) > 1e-10:\n raise NotImplementedError\n return omega, alpha, hyb\n_KNumInt = KNumInt\n", "#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport unittest\nimport numpy\nfrom pyscf import gto, scf\nfrom pyscf import dft\nfrom pyscf import lib\n\nmol = gto.Mole()\nmol.verbose = 0\nmol.output = None\nmol.atom = 'h 0 0 0; h 1 .5 0; h 0 4 1; h 1 0 .2'\nmol.basis = 'aug-ccpvdz'\nmol.build()\n#dm = scf.RHF(mol).run(conv_tol=1e-14).make_rdm1()\ndm = numpy.load(os.path.realpath(os.path.join(__file__, '..', 'dm_h4.npy')))\nmf = dft.RKS(mol)\nmf.grids.atom_grid = {\"H\": (50, 110)}\nmf.prune = None\nmf.grids.build(with_non0tab=False)\nnao = mol.nao_nr()\nao = dft.numint.eval_ao(mol, mf.grids.coords, deriv=1)\nrho = dft.numint.eval_rho(mol, ao, dm, xctype='GGA')\n\ndef tearDownModule():\n global mol, mf, ao, rho\n del mol, mf, ao, rho\n\ndef finger(a):\n w = numpy.cos(numpy.arange(a.size))\n return numpy.dot(w, a.ravel())\n\nclass KnownValues(unittest.TestCase):\n def test_parse_xc(self):\n hyb, fn_facs = dft.xcfun.parse_xc('.5*HF+.5*B3LYP,VWN*.5')\n self.assertAlmostEqual(hyb[0], .6, 12)\n self.assertEqual([x[0] for x in fn_facs], [0,6,16,3])\n self.assertTrue(numpy.allclose([x[1] for x in fn_facs],\n (0.04, 0.36, 0.405, 0.595)))\n hyb, fn_facs = dft.xcfun.parse_xc('HF,')\n self.assertEqual(hyb[0], 1)\n self.assertEqual(fn_facs, [])\n\n hyb, fn_facs = dft.libxc.parse_xc('B88 - SLATER')\n self.assertEqual(fn_facs, [(106, 1), (1, -1)])\n hyb, fn_facs = dft.libxc.parse_xc('B88 -SLATER*.5')\n self.assertEqual(fn_facs, [(106, 1), (1, -0.5)])\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.5*B3LYP+0.25*B3LYP')\n self.assertTrue(numpy.allclose(hyb, [.15, 0, 0]))\n hyb = dft.libxc.hybrid_coeff('0.5*B3LYP+0.25*B3LYP')\n self.assertAlmostEqual(hyb, .15, 12)\n\n hyb, fn_facs = dft.xcfun.parse_xc('CAM_B3LYP')\n self.assertTrue(numpy.allclose(hyb, [0.19, 0.65, 0.33]))\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.6*CAM_B3LYP+0.4*B3P86')\n self.assertTrue(numpy.allclose(hyb, [.08+0.19*.6, 0.65*.6, 0.33]))\n self.assertTrue(numpy.allclose(fn_facs,\n [(9, 0.6), (3, 0.19), (16, 0.486), (0, 0.032), (6, 0.288), (46, 0.324)]))\n rsh = dft.xcfun.rsh_coeff('0.6*CAM_B3LYP+0.4*B3P86')\n self.assertTrue(numpy.allclose(rsh, (0.33, 0.39, -0.196)))\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.4*B3P86+0.6*CAM_B3LYP')\n self.assertTrue(numpy.allclose(hyb, [.08+0.19*.6, 0.65*.6, 0.33]))\n self.assertTrue(numpy.allclose(fn_facs,\n [(0, 0.032), (6, 0.288), (46, 0.324), (3, 0.19), (9, 0.6), (16, 0.486)]))\n rsh = dft.xcfun.rsh_coeff('0.4*B3P86+0.6*CAM_B3LYP')\n self.assertTrue(numpy.allclose(rsh, (0.33, 0.39, -0.196)))\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.5*SR-HF(0.3) + .8*HF + .22*LR_HF')\n self.assertEqual(hyb, [1.3, 1.02, 0.3])\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.5*SR-HF + .22*LR_HF(0.3) + .8*HF')\n self.assertEqual(hyb, [1.3, 1.02, 0.3])\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.5*SR-HF + .8*HF + .22*LR_HF(0.3)')\n self.assertEqual(hyb, [1.3, 1.02, 0.3])\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.5*RSH(2.04;0.56;0.3) + 0.5*BP86')\n self.assertEqual(hyb, [1.3, 1.02, 0.3])\n self.assertEqual(fn_facs, [(6, 0.5), (46, 0.5)])\n\n self.assertRaises(ValueError, dft.xcfun.parse_xc, 'SR_HF(0.3) + LR_HF(.5)')\n self.assertRaises(ValueError, dft.xcfun.parse_xc, 'LR-HF(0.3) + SR-HF(.5)')\n\n hyb = dft.xcfun.hybrid_coeff('M05')\n self.assertAlmostEqual(hyb, 0.28, 9)\n\n hyb, fn_facs = dft.xcfun.parse_xc('APBE,')\n self.assertEqual(fn_facs[0][0], 58)\n\n hyb, fn_facs = dft.xcfun.parse_xc('VWN,')\n self.assertEqual(fn_facs, [(3, 1)])\n\n hyb, fn_facs = dft.xcfun.parse_xc('TF,')\n self.assertEqual(fn_facs, [(24, 1)])\n\n ref = [(0, 1), (3, 1)]\n self.assertEqual(dft.xcfun.parse_xc_name('LDA,VWN'), (0,3))\n self.assertEqual(dft.xcfun.parse_xc(('LDA','VWN'))[1], ref)\n self.assertEqual(dft.xcfun.parse_xc((0, 3))[1], ref)\n self.assertEqual(dft.xcfun.parse_xc('0, 3')[1], ref)\n self.assertEqual(dft.xcfun.parse_xc(3)[1], [(3,1)])\n\n #self.assertEqual(dft.xcfun.parse_xc('M11-L')[1], [(226,1),(75,1)])\n #self.assertEqual(dft.xcfun.parse_xc('M11L' )[1], [(226,1),(75,1)])\n #self.assertEqual(dft.xcfun.parse_xc('M11-L,M11L' )[1], [(226,1),(75,1)])\n #self.assertEqual(dft.xcfun.parse_xc('M11_L,M11-L')[1], [(226,1),(75,1)])\n #self.assertEqual(dft.xcfun.parse_xc('M11L,M11_L' )[1], [(226,1),(75,1)])\n\n #self.assertEqual(dft.xcfun.parse_xc('Xpbe,')[1], [(123,1)])\n #self.assertEqual(dft.xcfun.parse_xc('pbe,' )[1], [(101,1)])\n hyb, fn_facs = dft.xcfun.parse_xc('PBE*.4+LDA')\n self.assertEqual(fn_facs, [(5, 0.4), (4, 0.4), (0, 1)])\n hyb, fn_facs = dft.xcfun.parse_xc('PBE*.4+VWN')\n self.assertEqual(fn_facs, [(5, 0.4), (4, 0.4), (3, 1)])\n\n self.assertTrue (dft.xcfun.is_meta_gga('m05'))\n self.assertFalse(dft.xcfun.is_meta_gga('pbe0'))\n self.assertFalse(dft.xcfun.is_meta_gga('tf,'))\n self.assertFalse(dft.xcfun.is_meta_gga('vv10'))\n self.assertTrue (dft.xcfun.is_gga('PBE0'))\n self.assertFalse(dft.xcfun.is_gga('m05'))\n self.assertFalse(dft.xcfun.is_gga('tf,'))\n self.assertTrue (dft.xcfun.is_lda('tf,'))\n self.assertFalse(dft.xcfun.is_lda('vv10'))\n self.assertTrue (dft.xcfun.is_hybrid_xc('m05'))\n self.assertTrue (dft.xcfun.is_hybrid_xc('pbe0,'))\n self.assertFalse(dft.xcfun.is_hybrid_xc('m05,'))\n self.assertFalse(dft.xcfun.is_hybrid_xc('vv10'))\n self.assertTrue (dft.xcfun.is_hybrid_xc(('b3lyp',4,'vv10')))\n\n def test_nlc_coeff(self):\n self.assertEqual(dft.xcfun.nlc_coeff('vv10'), [5.9, 0.0093])\n\n def test_lda(self):\n e,v,f,k = dft.xcfun.eval_xc('lda,', rho[0][:3], deriv=3)\n self.assertAlmostEqual(lib.finger(e) , -0.4720562542635522, 8)\n self.assertAlmostEqual(lib.finger(v[0]), -0.6294083390180697, 8)\n self.assertAlmostEqual(lib.finger(f[0]), -1.1414693830969338, 8)\n self.assertAlmostEqual(lib.finger(k[0]), 4.1402447248393921, 8)\n\n e,v,f,k = dft.xcfun.eval_xc('lda,', [rho[0][:3]*.5]*2, spin=1, deriv=3)\n self.assertAlmostEqual(lib.finger(e) , -0.4720562542635522, 8)\n self.assertAlmostEqual(lib.finger(v[0].T[0]), -0.6294083390180697, 8)\n self.assertAlmostEqual(lib.finger(v[0].T[1]), -0.6294083390180697, 8)\n self.assertAlmostEqual(lib.finger(f[0].T[0]), -1.1414693830969338*2, 8)\n self.assertAlmostEqual(lib.finger(f[0].T[2]), -1.1414693830969338*2, 8)\n self.assertAlmostEqual(lib.finger(k[0].T[0]), 4.1402447248393921*4, 7)\n self.assertAlmostEqual(lib.finger(k[0].T[3]), 4.1402447248393921*4, 7)\n\n def test_lyp(self):\n e,v,f = dft.xcfun.eval_xc(',LYP', rho, deriv=3)[:3]\n self.assertAlmostEqual(numpy.dot(rho[0],e), -62.114576182676615, 8)\n self.assertAlmostEqual(numpy.dot(rho[0],v[0]),-81.771670866308455, 8)\n self.assertAlmostEqual(numpy.dot(rho[0],v[1]), 27.485383255125743, 8)\n self.assertAlmostEqual(numpy.dot(rho[0],f[0]), 186.823806251777, 7)\n self.assertAlmostEqual(numpy.dot(rho[0],f[1]), -3391.2428894571085, 6)\n self.assertAlmostEqual(numpy.dot(rho[0],f[2]), 0, 9)\n\n def test_beckex(self):\n rho =(numpy.array([1. , 1., 0., 0.]).reshape(-1,1),\n numpy.array([ .8, 1., 0., 0.]).reshape(-1,1))\n e,v,f = dft.xcfun.eval_xc('b88,', rho, spin=1, deriv=3)[:3]\n self.assertAlmostEqual(lib.finger(e) ,-0.9061911523772116 , 9)\n self.assertAlmostEqual(lib.finger(v[0]),-1.8531364353196298 , 9)\n self.assertAlmostEqual(lib.finger(v[1]),-0.0018308066137967724, 9)\n self.assertAlmostEqual(lib.finger(f[0]),-0.21602284426026866 , 9)\n self.assertAlmostEqual(lib.finger(f[1]), 0.0072053520662545617, 9)\n self.assertAlmostEqual(lib.finger(f[2]), 0.0002275350850255538, 9)\n\n def test_m05x(self):\n rho =(numpy.array([1., 1., 0., 0., 0., 0.165 ]).reshape(-1,1),\n numpy.array([.8, 1., 0., 0., 0., 0.1050]).reshape(-1,1))\n test_ref = numpy.array([-1.57876583, -2.12127045,-2.11264351,-0.00315462,\n 0.00000000, -0.00444560, 3.45640232, 4.4349756])\n exc, vxc, fxc, kxc = dft.xcfun.eval_xc('m05,', rho, 1, deriv=3)\n self.assertAlmostEqual(float(exc)*1.8, test_ref[0], 5)\n self.assertAlmostEqual(abs(vxc[0]-test_ref[1:3]).max(), 0, 6)\n self.assertAlmostEqual(abs(vxc[1]-test_ref[3:6]).max(), 0, 6)\n self.assertAlmostEqual(abs(vxc[3]-test_ref[6:8]).max(), 0, 5)\n\n exc, vxc, fxc, kxc = dft.xcfun.eval_xc('m05,', rho[0], 0, deriv=3)\n self.assertAlmostEqual(float(exc), -0.5746231988116002, 5)\n self.assertAlmostEqual(float(vxc[0]), -0.8806121005703862, 6)\n self.assertAlmostEqual(float(vxc[1]), -0.0032300155406846756, 7)\n self.assertAlmostEqual(float(vxc[3]), 0.4474953100487698, 5)\n\n def test_camb3lyp(self):\n rho = numpy.array([1., 1., 0.1, 0.1]).reshape(-1,1)\n exc, vxc, fxc, kxc = dft.xcfun.eval_xc('camb3lyp', rho, 0, deriv=1)\n # FIXME, xcfun and libxc do not agree on camb3lyp\n # self.assertAlmostEqual(float(exc), -0.5752559666317147, 5)\n # self.assertAlmostEqual(float(vxc[0]), -0.7709812578936763, 5)\n # self.assertAlmostEqual(float(vxc[1]), -0.0029862221286189846, 7)\n\n self.assertEqual(dft.xcfun.rsh_coeff('camb3lyp'), (0.33, 0.65, -0.46))\n\n def test_define_xc(self):\n def eval_xc(xc_code, rho, spin=0, relativity=0, deriv=1, verbose=None):\n exc = vxc = fxc = kxc = None\n return exc, vxc, fxc, kxc\n\n mf = dft.RKS(mol)\n ni = dft.xcfun.define_xc(mf._numint, eval_xc, 'GGA', hyb=0.2)\n ni = dft.xcfun.define_xc(mf._numint, 'b3lyp+vwn', 'GGA', hyb=0.2)\n self.assertRaises(ValueError, dft.xcfun.define_xc, mf._numint, 0.1)\n\n def test_vs_libxc_rks(self):\n ao = dft.numint.eval_ao(mol, mf.grids.coords[:200], deriv=2)\n rho = dft.numint.eval_rho(mol, ao, dm, xctype='MGGA')\n rhoa = rho[:,:200]\n def check(xc_code, deriv=3, e_place=9, v_place=9, f_place=9, k_place=9):\n exc0, vxc0, fxc0, kxc0 = dft.libxc.eval_xc(xc_code, rhoa, 0, deriv=deriv)\n exc1, vxc1, fxc1, kxc1 = dft.xcfun.eval_xc(xc_code, rhoa, 0, deriv=deriv)\n self.assertAlmostEqual(abs(exc0-exc1).max(), 0, e_place)\n if deriv > 0:\n for v0, v1 in zip(vxc0, vxc1):\n if v0 is not None and v1 is not None:\n self.assertAlmostEqual(abs(v0-v1).max(), 0, v_place)\n if deriv > 1:\n for f0, f1 in zip(fxc0, fxc1):\n if f0 is not None and f1 is not None:\n self.assertAlmostEqual(abs(f0-f1).max(), 0, f_place)\n if deriv > 2:\n for k0, k1 in zip(kxc0, kxc1):\n if k0 is not None and k1 is not None:\n self.assertAlmostEqual(abs(k0-k1).max(), 0, k_place)\n\n check('lda,')\n\n check('pw86,')\n check('pbe,', e_place=6, v_place=6, f_place=5, k_place=4)\n #?check('becke,')\n #?check('br,')\n #?check('LDAERF,')\n check('optx,')\n check('OPTXCORR,')\n check('RPBE,')\n check('TF,' )\n check('PW91,' , e_place=6, v_place=4, f_place=2, k_place=-1)\n check('m05,' , deriv=1, e_place=6, v_place=6)\n check('m052x,', deriv=1, e_place=6, v_place=6)\n check('m06,' , deriv=1, e_place=6, v_place=6)\n check('m062x,', deriv=1, e_place=6, v_place=6)\n check('m06l,' , deriv=1, e_place=6, v_place=6)\n check('TPSS,' , k_place=-4)\n #?check('REVTPSS,', deriv=1) # xcfun crash\n check('APBE,')\n check('BLOC,' , k_place=-5)\n check('PBEINT,', e_place=7, v_place=6, f_place=5, k_place=4)\n\n check(',vwn3')\n check(',vwn5')\n check(',pbe' , deriv=2)\n #?check(',br')\n #?check(',LDAERF')\n check(',lyp' , deriv=2)\n check(',SPBE' , deriv=2, e_place=1, v_place=1, f_place=0)\n check(',PW91' , deriv=2, f_place=3)\n check(',m052x', deriv=1)\n check(',m05' , deriv=1)\n check(',m06' , deriv=1)\n check(',m062x', deriv=1)\n check(',m06l' , deriv=1)\n check(',TPSS' , deriv=1, v_place=1)\n check(',REVTPSS', deriv=1, e_place=2, v_place=1)\n check(',p86' , deriv=2, e_place=5, v_place=5, f_place=3)\n check(',APBE' , deriv=2)\n check(',PBEINT' , deriv=1)\n check(',TPSSLOC', deriv=1, e_place=1, v_place=0)\n\n #?check('br')\n check('revpbe', deriv=2, e_place=6, v_place=6, f_place=5)\n check('b97' , deriv=2, e_place=6, v_place=5, f_place=3)\n #?check('b97_1')\n #?check('b97_2')\n check('SVWN')\n check('BLYP' , deriv=2)\n check('BP86' , deriv=2, e_place=5, v_place=5, f_place=3)\n check('OLYP' , deriv=2)\n check('KT1' , deriv=1)\n check('KT2' , deriv=1)\n #?check('KT3')\n check('PBE0' , deriv=2, e_place=6, v_place=6, f_place=5)\n check('B3P86' , deriv=2, e_place=5, v_place=5, f_place=3)\n check('B3P86G' , deriv=2, e_place=5, v_place=5, f_place=3)\n check('B3PW91' , deriv=2, f_place=4)\n check('B3PW91G', deriv=2, e_place=2, v_place=2, f_place=2)\n check('B3LYP' , deriv=2)\n check('B3LYP5' , deriv=2)\n check('B3LYPG' , deriv=2)\n check('O3LYP' , deriv=2)\n check('X3LYP' , deriv=2, e_place=7, v_place=5, f_place=2)\n check('CAMB3LYP', deriv=1)\n check('B97_1' , deriv=2, e_place=6, v_place=5, f_place=3)\n check('B97_2' , deriv=2, e_place=6, v_place=5, f_place=3)\n check('TPSSH' , deriv=1, v_place=1)\n\n def test_vs_libxc_uks(self):\n ao = dft.numint.eval_ao(mol, mf.grids.coords[:400], deriv=2)\n rho = dft.numint.eval_rho(mol, ao, dm, xctype='MGGA')\n rhoa = rho[:,:200]\n rhob = rhoa + rho[:,200:400]\n def check(xc_code, deriv=3, e_place=9, v_place=9, f_place=9, k_place=9):\n exc0, vxc0, fxc0, kxc0 = dft.libxc.eval_xc(xc_code, (rhoa, rhob), 1, deriv=deriv)\n exc1, vxc1, fxc1, kxc1 = dft.xcfun.eval_xc(xc_code, (rhoa, rhob), 1, deriv=deriv)\n self.assertAlmostEqual(abs(exc0-exc1).max(), 0, e_place)\n if deriv > 0:\n for v0, v1 in zip(vxc0, vxc1):\n if v0 is not None and v1 is not None:\n self.assertAlmostEqual(abs(v0-v1).max(), 0, v_place)\n if deriv > 1:\n for f0, f1 in zip(fxc0, fxc1):\n if f0 is not None and f1 is not None:\n self.assertAlmostEqual(abs(f0-f1).max(), 0, f_place)\n if deriv > 2 and kxc0 is not None:\n for k0, k1 in zip(kxc0, kxc1):\n if k0 is not None and k1 is not None:\n self.assertAlmostEqual(abs(k0-k1).max(), 0, k_place)\n\n check('lda,')\n\n check('pw86,')\n check('pbe,', e_place=6, v_place=6, f_place=5, k_place=4)\n #?check('becke,')\n #?check('br,')\n #?check('LDAERF,')\n check('optx,')\n check('OPTXCORR,')\n check('RPBE,')\n check('TF,' , e_place=0, v_place=-1, f_place=-2, k_place=-2)\n check('PW91,' , e_place=6, v_place=4, f_place=2, k_place=-1)\n check('m05,' , deriv=1, e_place=6, v_place=6)\n check('m052x,', deriv=1, e_place=6, v_place=6)\n check('m06,' , deriv=1, e_place=6, v_place=6)\n check('m062x,', deriv=1, e_place=6, v_place=6)\n check('m06l,' , deriv=1, e_place=6, v_place=6)\n check('TPSS,' , k_place=-4)\n #?check('REVTPSS,', deriv=1) # libxc crash\n check('APBE,')\n check('BLOC,' , k_place=-5)\n check('PBEINT,', e_place=7, v_place=6, f_place=5, k_place=4)\n\n check(',vwn3', e_place=2, v_place=1, f_place=1, k_place=0)\n check(',vwn5')\n check(',pbe' , deriv=2)\n #?check(',br')\n #?check(',LDAERF')\n check(',lyp' , deriv=2)\n check(',SPBE' , deriv=2, e_place=1, v_place=1, f_place=0)\n check(',PW91' , deriv=2, f_place=3)\n check(',m052x', deriv=1)\n check(',m05' , deriv=1)\n check(',m06' , deriv=1)\n check(',m062x', deriv=1)\n check(',m06l' , deriv=1)\n check(',TPSS' , deriv=1, v_place=1)\n check(',REVTPSS', deriv=1, e_place=2, v_place=1)\n check(',p86' , deriv=2, e_place=5, v_place=5, f_place=3)\n check(',APBE' , deriv=2)\n check(',PBEINT' , deriv=1)\n check(',TPSSLOC', deriv=1, e_place=1, v_place=0)\n\n #?check('br')\n check('revpbe', deriv=2, e_place=6, v_place=6, f_place=5)\n check('b97' , deriv=2, e_place=6, v_place=5, f_place=3)\n #?check('b97_1')\n #?check('b97_2')\n check('SVWN')\n check('BLYP' , deriv=2)\n check('BP86' , deriv=2, e_place=5, v_place=5, f_place=3)\n check('OLYP' , deriv=2)\n check('KT1' , deriv=1)\n check('KT2' , deriv=1)\n #?check('KT3')\n check('PBE0' , deriv=2, e_place=6, v_place=6, f_place=5)\n check('B3P86' , deriv=2, e_place=5, v_place=5, f_place=3)\n check('B3P86G' , deriv=2, e_place=3, v_place=2, f_place=2)\n check('B3PW91' , deriv=2, f_place=4)\n check('B3PW91G', deriv=2, e_place=2, v_place=2, f_place=2)\n check('B3LYP' , deriv=2)\n check('B3LYP5' , deriv=2)\n check('B3LYPG' , deriv=2, e_place=3, v_place=2, f_place=2)\n check('O3LYP' , deriv=2, e_place=3, v_place=2, f_place=1)\n check('X3LYP' , deriv=2, e_place=7, v_place=5, f_place=2)\n check('CAMB3LYP', deriv=1)\n check('B97_1' , deriv=2, e_place=6, v_place=5, f_place=3)\n check('B97_2' , deriv=2, e_place=6, v_place=5, f_place=3)\n check('TPSSH' , deriv=1, v_place=1)\n\n\nif __name__ == \"__main__\":\n print(\"Test xcfun\")\n unittest.main()\n\n", "#!/usr/bin/env python\n# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nNon-relativistic unrestricted Hartree-Fock g-tensor\n\nRefs:\n JPC, 101, 3388\n JCP, 115, 11080\n JCP, 119, 10489\nNote g-tensor = 1/muB d^2 E/ dB dS\nIn some literature, muB is not explicitly presented in the perturbation formula.\n'''\n\nimport time\nfrom functools import reduce\nimport copy\nimport numpy\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf.scf import _vhf\nfrom pyscf.prop.nmr import rhf as rhf_nmr\nfrom pyscf.prop.nmr import uhf as uhf_nmr\nfrom pyscf.prop.zfs.uhf import koseki_charge\nfrom pyscf.data import nist\n\ndef dia(gobj, dm0, gauge_orig=None):\n '''Note the side effects of set_common_origin'''\n\n if isinstance(dm0, numpy.ndarray) and dm0.ndim == 2: # RHF DM\n return numpy.zeros((3,3))\n mol = gobj.mol\n\n dma, dmb = dm0\n spindm = dma - dmb\n effspin = mol.spin * .5\n muB = .5 # Bohr magneton\n alpha2 = nist.ALPHA ** 2\n #Many choices of qed_fac, see JPC, 101, 3388\n #qed_fac = (nist.G_ELECTRON - 1)\n #qed_fac = nist.G_ELECTRON / 2\n qed_fac = 1\n\n# relativistic mass correction (RMC)\n rmc = -numpy.einsum('ij,ji', mol.intor('int1e_kin'), spindm)\n rmc *= qed_fac / effspin * alpha2\n logger.info(gobj, 'RMC = %s', rmc)\n\n assert(not mol.has_ecp())\n# GC(1e)\n if gauge_orig is not None:\n mol.set_common_origin(gauge_orig)\n h11 = 0\n for ia in range(mol.natm):\n mol.set_rinv_origin(mol.atom_coord(ia))\n Z = mol.atom_charge(ia)\n if gobj.so_eff_charge or not gobj.dia_soc2e:\n Z = koseki_charge(Z)\n# GC(1e) = 1/4c^2 Z/(2r_N^3) [vec{r}_N dot r sigma dot B - B dot vec{r}_N r dot sigma]\n# a11part = (B dot) -1/2 frac{\\vec{r}_N}{r_N^3} r (dot sigma)\n if gauge_orig is None:\n h11 += Z * mol.intor('int1e_giao_a11part', 9)\n else:\n h11 += Z * mol.intor('int1e_cg_a11part', 9)\n trh11 = h11[0] + h11[4] + h11[8]\n h11[0] -= trh11\n h11[4] -= trh11\n h11[8] -= trh11\n if gauge_orig is None:\n for ia in range(mol.natm):\n mol.set_rinv_origin(mol.atom_coord(ia))\n Z = mol.atom_charge(ia)\n if gobj.so_eff_charge or not gobj.dia_soc2e:\n Z = koseki_charge(Z)\n h11 += Z * mol.intor('int1e_a01gp', 9)\n gc1e = numpy.einsum('xij,ji->x', h11, spindm).reshape(3,3)\n if gobj.mb: # correction of order c^{-2} from MB basis\n gc1e += numpy.einsum('ij,ji', mol.intor('int1e_nuc'), spindm) * numpy.eye(3)\n\n gc1e *= (alpha2/4) / effspin / muB\n if gobj.verbose >= logger.INFO:\n _write(gobj, gobj.align(gc1e)[0], 'GC(1e)')\n\n if gobj.dia_soc2e:\n gc2e = gobj.make_dia_gc2e(dm0, gauge_orig, qed_fac)\n if gobj.verbose >= logger.INFO:\n _write(gobj, gobj.align(gc2e)[0], 'GC(2e)')\n else:\n gc2e = 0\n\n gdia = gc1e + gc2e + rmc * numpy.eye(3)\n return gdia\n\ndef make_dia_gc2e(gobj, dm0, gauge_orig, sso_qed_fac=1):\n '''Note the side effects of set_common_origin'''\n\n if (isinstance(gobj.dia_soc2e, str) and\n ('SOMF' in gobj.dia_soc2e.upper() or 'AMFI' in gobj.dia_soc2e.upper())):\n raise NotImplementedError(gobj.dia_soc2e)\n if isinstance(gobj.dia_soc2e, str):\n with_sso = 'SSO' in gobj.dia_soc2e.upper()\n with_soo = 'SOO' in gobj.dia_soc2e.upper()\n else:\n with_sso = with_soo = True\n\n mol = gobj.mol\n dma, dmb = dm0\n effspin = mol.spin * .5\n muB = .5 # Bohr magneton\n alpha2 = nist.ALPHA ** 2\n #sso_qed_fac = (nist.G_ELECTRON - 1)\n\n # int2e_ip1v_r1 = (ij|\\frac{\\vec{r}_{12}}{r_{12}^3} \\vec{r}_1|kl)\n if gauge_orig is None:\n intor = mol._add_suffix('int2e_ip1v_r1')\n else:\n mol.set_common_origin(gauge_orig)\n intor = mol._add_suffix('int2e_ip1v_rc1')\n vj, vk = _vhf.direct_mapdm(intor,\n 's2kl', ('lk->s1ij', 'jk->s1il'),\n (dma, dmb), 9,\n mol._atm, mol._bas, mol._env)\n ek = numpy.einsum('xil,li->x', vk[0], dma)\n ek-= numpy.einsum('xil,li->x', vk[1], dmb)\n ek = ek.reshape(3,3)\n gc2e = 0\n if with_sso:\n # spin-density should be contracted to electron 1 (associated to operator r1)\n ej = numpy.einsum('xij,ji->x', vj[0]+vj[1], dma-dmb).reshape(3,3)\n gc2e += sso_qed_fac * (ej - ek)\n if with_soo:\n # spin-density should be contracted to electron 2\n ej = numpy.einsum('xij,ji->x', vj[0]-vj[1], dma+dmb).reshape(3,3)\n gc2e += 2 * (ej - ek)\n gc2e -= numpy.eye(3) * gc2e.trace()\n gc2e *= (alpha2/8) / effspin / muB\n\n # ([GIAO-i j] + [i GIAO-j]|\\frac{\\vec{r}_{12}}{r_{12}^3} x p1|kl)\n # + (ij|\\frac{\\vec{r}_{12}}{r_{12}^3} x p1|[GIAO-k l] + [k GIAO-l])\n if gauge_orig is None:\n nao = dma.shape[0]\n vj, vk = _vhf.direct_mapdm(mol._add_suffix('int2e_ipvg1_xp1'),\n 's2kl', ('lk->s1ij', 'jk->s1il'),\n (dma, dmb), 9,\n mol._atm, mol._bas, mol._env)\n vk1 = _vhf.direct_mapdm(mol._add_suffix('int2e_ipvg2_xp1'),\n 'aa4', 'jk->s1il',\n (dma, dmb), 9,\n mol._atm, mol._bas, mol._env)\n vj = vj.reshape(2,3,3,nao,nao)\n vk = vk.reshape(2,3,3,nao,nao)\n vk += vk1.reshape(2,3,3,nao,nao).transpose(0,2,1,3,4)\n ek = numpy.einsum('xyij,ji->xy', vk[0], dma)\n ek-= numpy.einsum('xyij,ji->xy', vk[1], dmb)\n dia_giao = 0\n if with_sso:\n ej = numpy.einsum('xyij,ji->xy', vj[0]+vj[1], dma-dmb)\n dia_giao += sso_qed_fac * (ej - ek)\n if with_soo:\n ej = numpy.einsum('xyij,ji->xy', vj[0]-vj[1], dma+dmb)\n dia_giao += 2 * (ej - ek)\n gc2e -= dia_giao * (alpha2/4) / effspin / muB\n\n if gobj.mb: # correction of order c^{-2} from MB basis\n vj, vk = gobj._scf.get_jk(mol, dm0)\n vhf = vj[0] + vj[1] - vk\n gc_mb = numpy.einsum('ij,ji', vhf[0], dma)\n gc_mb-= numpy.einsum('ij,ji', vhf[1], dmb)\n gc2e += gc_mb * (alpha2/4) / effspin / muB * numpy.eye(3)\n\n return gc2e\n\n\n# Note mo10 is the imaginary part of MO^1\ndef para(gobj, mo10, mo_coeff, mo_occ, qed_fac=1):\n mol = gobj.mol\n effspin = mol.spin * .5\n muB = .5 # Bohr magneton\n #qed_fac = (nist.G_ELECTRON - 1)\n #qed_fac = nist.G_ELECTRON / 2\n\n orboa = mo_coeff[0][:,mo_occ[0]>0]\n orbob = mo_coeff[1][:,mo_occ[1]>0]\n dm0a = numpy.dot(orboa, orboa.T)\n dm0b = numpy.dot(orbob, orbob.T)\n dm10a = [reduce(numpy.dot, (mo_coeff[0], x, orboa.T)) for x in mo10[0]]\n dm10b = [reduce(numpy.dot, (mo_coeff[1], x, orbob.T)) for x in mo10[1]]\n dm10a = numpy.asarray([x-x.T for x in dm10a])\n dm10b = numpy.asarray([x-x.T for x in dm10b])\n\n hso1e = make_h01_soc1e(gobj, mo_coeff, mo_occ, qed_fac)\n gpara1e =-numpy.einsum('xji,yij->xy', dm10a, hso1e)\n gpara1e+= numpy.einsum('xji,yij->xy', dm10b, hso1e)\n gpara1e *= 1. / effspin / muB\n if gobj.verbose >= logger.INFO:\n _write(gobj, gobj.align(gpara1e)[0], 'SOC(1e)/OZ')\n\n gpara2e = gobj.make_para_soc2e((dm0a,dm0b), (dm10a,dm10b), qed_fac)\n gpara = gpara1e + gpara2e\n return gpara\n\ndef make_para_soc2e(gobj, dm0, dm10, sso_qed_fac=1):\n if isinstance(gobj.para_soc2e, str):\n with_sso = 'SSO' in gobj.para_soc2e.upper()\n with_soo = 'SOO' in gobj.para_soc2e.upper()\n with_somf = 'SOMF' in gobj.para_soc2e.upper()\n with_amfi = 'AMFI' in gobj.para_soc2e.upper()\n assert(not (with_somf and (with_sso or with_soo)))\n elif gobj.para_soc2e:\n with_sso = with_soo = True\n with_somf = with_amfi = False\n else:\n return 0\n\n mol = gobj.mol\n alpha2 = nist.ALPHA ** 2\n effspin = mol.spin * .5\n muB = .5 # Bohr magneton\n #sso_qed_fac = (nist.G_ELECTRON - 1)\n #sso_qed_fac = nist.G_ELECTRON / 2\n\n dm10a, dm10b = dm10\n if with_amfi:\n vj, vk = get_jk_amfi(mol, dm0)\n else:\n vj, vk = get_jk(mol, dm0)\n\n gpara2e = 0\n if with_sso or with_soo:\n ek = numpy.einsum('yil,xli->xy', vk[0], dm10a)\n ek -= numpy.einsum('yil,xli->xy', vk[1], dm10b)\n if with_sso:\n ej = numpy.einsum('yij,xji->xy', vj[0]+vj[1], dm10a-dm10b)\n# ~ <H^{01},MO^1> = - Tr(Im[H^{01}],Im[MO^1])\n gpara2e -= sso_qed_fac * (ej - ek)\n if with_soo:\n ej = numpy.einsum('yij,xji->xy', vj[0]-vj[1], dm10a+dm10b)\n gpara2e -= 2 * (ej - ek)\n else: # SOMF, see JCP 122, 034107 Eq (19)\n ej = numpy.einsum('yij,xji->xy', vj[0]+vj[1], dm10a-dm10b)\n ek = numpy.einsum('yil,xli->xy', vk[0]+vk[1], dm10a-dm10b)\n gpara2e -= ej - 1.5 * ek\n gpara2e *= (alpha2/4) / effspin / muB\n if gobj.verbose >= logger.INFO:\n _write(gobj, gobj.align(gpara2e)[0], 'SOC(2e)/OZ')\n return gpara2e\n\n\ndef para_for_debug(gobj, mo10, mo_coeff, mo_occ, qed_fac=1):\n mol = gobj.mol\n effspin = mol.spin * .5\n muB = .5 # Bohr magneton\n orboa = mo_coeff[0][:,mo_occ[0]>0]\n orbob = mo_coeff[1][:,mo_occ[1]>0]\n dm10a = [reduce(numpy.dot, (mo_coeff[0], x, orboa.T)) for x in mo10[0]]\n dm10b = [reduce(numpy.dot, (mo_coeff[1], x, orbob.T)) for x in mo10[1]]\n dm10a = numpy.asarray([x-x.T for x in dm10a])\n dm10b = numpy.asarray([x-x.T for x in dm10b])\n\n # <H^{01},MO^1> = - Tr(Im[H^{01}],Im[MO^1])\n hso1e = make_h01_soc1e(gobj, mo_coeff, mo_occ, qed_fac)\n gpara1e =-numpy.einsum('xji,yij->xy', dm10a, hso1e)\n gpara1e+= numpy.einsum('xji,yij->xy', dm10b, hso1e)\n gpara1e *= 1./effspin / muB\n if gobj.verbose >= logger.INFO:\n _write(gobj, gobj.align(gpara1e)[0], 'SOC(1e)/OZ')\n\n if gobj.para_soc2e:\n h1aa, h1bb = make_h01_soc2e(gobj, mo_coeff, mo_occ, qed_fac)\n gpara2e =-numpy.einsum('xji,yij->xy', dm10a, h1aa)\n gpara2e-= numpy.einsum('xji,yij->xy', dm10b, h1bb)\n gpara2e *= 1./effspin / muB\n if gobj.verbose >= logger.INFO:\n _write(gobj, gobj.align(gpara2e)[0], 'SOC(2e)/OZ')\n else:\n gpara2e = 0\n gpara = gpara1e + gpara2e\n return gpara\n\n\ndef make_h01_soc1e(gobj, mo_coeff, mo_occ, qed_fac=1):\n mol = gobj.mol\n assert(not mol.has_ecp())\n alpha2 = nist.ALPHA ** 2\n #qed_fac = (nist.G_ELECTRON - 1)\n\n# hso1e is the imaginary part of [i sigma dot pV x p]\n# JCP, 122, 034107 Eq (2) = 1/4c^2 hso1e\n if gobj.so_eff_charge:\n hso1e = 0\n for ia in range(mol.natm):\n Z = koseki_charge(mol.atom_charge(ia))\n mol.set_rinv_origin(mol.atom_coord(ia))\n hso1e += -Z * mol.intor_asymmetric('int1e_prinvxp', 3)\n else:\n hso1e = mol.intor_asymmetric('int1e_pnucxp', 3)\n hso1e *= qed_fac * (alpha2/4)\n return hso1e\n\ndef get_jk(mol, dm0):\n # K_{pq} = (pi|iq) + (iq|pi)\n vj, vk, vk1 = _vhf.direct_mapdm(mol._add_suffix('int2e_p1vxp1'),\n 'a4ij', ('lk->s2ij', 'jk->s1il', 'li->s1kj'),\n dm0, 3, mol._atm, mol._bas, mol._env)\n for i in range(3):\n lib.hermi_triu(vj[0,i], hermi=2, inplace=True)\n lib.hermi_triu(vj[1,i], hermi=2, inplace=True)\n vk += vk1\n return vj, vk\n\ndef get_jk_amfi(mol, dm0):\n '''Atomic-mean-field approximation'''\n dma, dmb = dm0\n ao_loc = mol.ao_loc_nr()\n nao = ao_loc[-1]\n vj = numpy.zeros((2,3,nao,nao))\n vk = numpy.zeros((2,3,nao,nao))\n atom = copy.copy(mol)\n aoslice = mol.aoslice_by_atom(ao_loc)\n for ia in range(mol.natm):\n symb = mol.atom_symbol(ia)\n b0, b1, p0, p1 = aoslice[ia]\n atom._bas = mol._bas[b0:b1]\n vj1, vk1 = get_jk(atom, (dma[p0:p1,p0:p1],dmb[p0:p1,p0:p1]))\n vj[:,:,p0:p1,p0:p1] = vj1\n vk[:,:,p0:p1,p0:p1] = vk1\n return vj, vk\n\ndef get_j(mol, dm0):\n vj = _vhf.direct_mapdm(mol._add_suffix('int2e_p1vxp1'),\n 'a4ij', 'lk->s2ij',\n dm0, 3, mol._atm, mol._bas, mol._env)\n for i in range(3):\n lib.hermi_triu(vj[0,i], hermi=2, inplace=True)\n lib.hermi_triu(vj[1,i], hermi=2, inplace=True)\n return vj\n\ndef get_j_amfi(mol, dm0):\n '''Atomic-mean-field approximation'''\n return get_jk_amfi(mol, dm0)[0]\n\n\n# hso2e is the imaginary part of SSO\n# SSO term of JCP, 122, 034107 Eq (3) = 1/4c^2 hso2e\ndef make_h01_soc2e(gobj, mo_coeff, mo_occ, sso_qed_fac=1):\n mol = gobj.mol\n alpha2 = nist.ALPHA ** 2\n #sso_qed_fac = (nist.G_ELECTRON - 1)\n\n dm0 = gobj._scf.make_rdm1(mo_coeff, mo_occ)\n vj, vk = get_jk(mol, dm0)\n\n vjaa = 0\n vjbb = 0\n vkaa = 0\n vkbb = 0\n if 'SSO' in gobj.para_soc2e.upper():\n vj1 = vj[0] + vj[1]\n vjaa += vj1 * sso_qed_fac\n vjbb -= vj1 * sso_qed_fac\n vkaa += vk[0] * sso_qed_fac\n vkbb -= vk[1] * sso_qed_fac\n if 'SOO' in gobj.para_soc2e.upper():\n vj1 = vj[0] - vj[1]\n vjaa += vj1 * 2\n vjbb += vj1 * 2\n vkaa += vk[0] * 2\n vkbb -= vk[1] * 2\n haa = (vjaa - vkaa) * (alpha2/4)\n hbb = (vjbb - vkbb) * (alpha2/4)\n return haa, hbb\n\ndef align(gtensor):\n '''Transform the orientation of g-tensor.\n The new orientations are the eigenvector of G matrix (G=g.gT)\n '''\n w, v = numpy.linalg.eigh(numpy.dot(gtensor, gtensor.T))\n idxmax = abs(v).argmax(axis=0)\n v[:,v[idxmax,[0,1,2]]<0] *= -1 # format phase\n sorted_axis = numpy.argsort(idxmax)\n v = v[:,sorted_axis]\n if numpy.linalg.det(v) < 0: # ensure new axes in RHS\n v[:,2] *= -1\n g2 = reduce(numpy.dot, (v.T, gtensor, v))\n return g2, v\n\ndef _write(gobj, gtensor, title):\n gobj.stdout.write('%s %s\\n' % (title, gtensor.diagonal()))\n if gobj.verbose >= logger.DEBUG:\n rhf_nmr._write(gobj.stdout, gtensor, title+' tensor')\n w = numpy.linalg.svd(gtensor)[1]\n gobj.stdout.write('sqrt(ggT) %s\\n' % w)\n\n\nclass GTensor(lib.StreamObject):\n '''dE = B dot gtensor dot s\n\n Attributes:\n dia_soc2e : str or bool\n 2-electron spin-orbit coupling for diamagnetic term. Its value can\n be 'SSO', 'SOO', 'SSO+SOO', None/False or True (='SSO+SOO').\n Default is False.\n para_soc2e : str or bool\n 2-electron spin-orbit coupling for paramagnetic term. Its value\n can be 'SSO', 'SOO', 'SSO+SOO', None/False, True (='SSO+SOO')\n 'SOMF', 'AMFI' (='AMFI+SSO+SOO'), 'SOMF+AMFI', 'AMFI+SSO',\n 'AMFI+SOO', 'AMFI+SSO+SOO'. Default is 'SSO+SOO'.\n koseki_charge : bool\n Whether to use Koseki effective SOC charge in 1-electron\n diamagnetic term and paramagnetic term. Default is False.\n '''\n def __init__(self, mf):\n self.mol = mf.mol\n self.verbose = mf.mol.verbose\n self.stdout = mf.mol.stdout\n self.chkfile = mf.chkfile\n self._scf = mf\n\n # dia_soc2e is 2-electron spin-orbit coupling for diamagnetic term\n # dia_soc2e can be 'SSO', 'SOO', 'SSO+SOO', None/False, True (='SSO+SOO')\n self.dia_soc2e = False\n # para_soc2e is 2-electron spin-orbit coupling for paramagnetic term\n # para_soc2e can be 'SSO', 'SOO', 'SSO+SOO', None/False, True (='SSO+SOO')\n # 'SOMF', 'AMFI' (='AMFI+SSO+SOO'), 'SOMF+AMFI', 'AMFI+SSO',\n # 'AMFI+SOO', 'AMFI+SSO+SOO'\n self.para_soc2e = 'SSO+SOO'\n # Koseki effective SOC charge\n self.so_eff_charge = False\n\n # corresponding to RMB basis (DO NOT use. It's in testing)\n self.mb = False\n\n# gauge_orig=None will call GIAO. A coordinate array leads to common gauge\n self.gauge_orig = None\n self.cphf = True\n self.max_cycle_cphf = 20\n self.conv_tol = 1e-9\n\n self.mo10 = None\n self.mo_e10 = None\n self._keys = set(self.__dict__.keys())\n\n def dump_flags(self, verbose=None):\n log = logger.new_logger(self, verbose)\n log.info('\\n')\n log.info('******** %s for %s (In testing) ********',\n self.__class__, self._scf.__class__)\n if self.gauge_orig is None:\n log.info('gauge = GIAO')\n else:\n log.info('Common gauge = %s', str(self.gauge_orig))\n log.info('with cphf = %s', self.cphf)\n if self.cphf:\n log.info('CPHF conv_tol = %g', self.conv_tol)\n log.info('CPHF max_cycle_cphf = %d', self.max_cycle_cphf)\n log.info('dia_soc2e = %s', self.dia_soc2e)\n log.info('para_soc2e = %s', self.para_soc2e)\n log.info('so_eff_charge = %s (1e SO effective charge)',\n self.so_eff_charge)\n return self\n\n def kernel(self, mo1=None):\n cput0 = (time.clock(), time.time())\n self.check_sanity()\n self.dump_flags()\n\n gdia = self.dia(self._scf.make_rdm1(), self.gauge_orig)\n gpara = self.para(mo1, self._scf.mo_coeff, self._scf.mo_occ)\n gshift = gpara + gdia\n gtensor = gshift + numpy.eye(3) * nist.G_ELECTRON\n\n logger.timer(self, 'g-tensor', *cput0)\n if self.verbose >= logger.NOTE:\n logger.note(self, 'free electron g %s', nist.G_ELECTRON)\n gtot, v = self.align(gtensor)\n gdia = reduce(numpy.dot, (v.T, gdia, v))\n gpara = reduce(numpy.dot, (v.T, gpara, v))\n gshift = gtot - numpy.eye(3) * nist.G_ELECTRON\n if self.verbose >= logger.INFO:\n _write(self, gdia, 'g-tensor diamagnetic terms')\n _write(self, gpara, 'g-tensor paramagnetic terms')\n _write(self, gtot, 'g-tensor total')\n _write(self, gshift*1e3, 'g-shift (ppt)')\n return gtensor\n\n def dia(self, dm0=None, gauge_orig=None):\n if gauge_orig is None: gauge_orig = self.gauge_orig\n if dm0 is None: dm0 = self._scf.make_rdm1()\n return dia(self, dm0, gauge_orig)\n\n def para(self, mo10=None, mo_coeff=None, mo_occ=None):\n if mo_coeff is None: mo_coeff = self._scf.mo_coeff\n if mo_occ is None: mo_occ = self._scf.mo_occ\n if mo10 is None:\n self.mo10, self.mo_e10 = self.solve_mo1()\n mo10 = self.mo10\n return para(self, mo10, mo_coeff, mo_occ)\n\n make_dia_gc2e = make_dia_gc2e\n make_para_soc2e = make_para_soc2e\n solve_mo1 = uhf_nmr.solve_mo1\n get_fock = uhf_nmr.get_fock\n\n def get_ovlp(self, mol=None, gauge_orig=None):\n if mol is None: mol = self.mol\n if gauge_orig is None: gauge_orig = self.gauge_orig\n return rhf_nmr.get_ovlp(mol, gauge_orig)\n\n def align(self, gtensor):\n return align(gtensor)\n\n\nif __name__ == '__main__':\n from pyscf import gto, scf\n mol = gto.M(atom='C 0 0 0; O 0 0 1.25',\n basis='ccpvdz', spin=1, charge=1, verbose=3)\n mf = scf.newton(scf.UHF(mol)).run()\n gobj = GTensor(mf)\n gobj.para_soc2e = 'SSO+SOO'\n gobj.dia_soc2e = None\n gobj.so_eff_charge = False\n gobj.gauge_orig = (0,0,0)\n print(gobj.align(gobj.kernel())[0])\n\n mol = gto.M(atom='''\n H 0 0 1\n ''',\n basis='ccpvdz', spin=1, charge=0, verbose=3)\n mf = scf.UHF(mol).run()\n gobj = GTensor(mf)\n print(gobj.align(gobj.kernel())[0])\n\n mol = gto.M(atom='''\n H 0 0 1\n H 1.2 0 1\n H .1 1.1 0.3\n H .8 .7 .6\n ''',\n basis='ccpvdz', spin=1, charge=1, verbose=3)\n mf = scf.UHF(mol).run()\n gobj = GTensor(mf)\n #print(gobj.kernel())\n gobj.para_soc2e = 'SSO+SOO'\n gobj.dia_soc2e = None\n gobj.so_eff_charge = False\n nao, nmo = mf.mo_coeff[0].shape\n nelec = mol.nelec\n numpy.random.seed(1)\n mo10 =[numpy.random.random((3,nmo,nelec[0])),\n numpy.random.random((3,nmo,nelec[1]))]\n print(lib.finger(para(gobj, mo10, mf.mo_coeff, mf.mo_occ)) - 4.3706065384682997e-05)\n print(lib.finger(para_for_debug(gobj, mo10, mf.mo_coeff, mf.mo_occ)) - 4.3706065384682997e-05)\n numpy.random.seed(1)\n dm0 = numpy.random.random((2,nao,nao))\n dm0 = dm0 + dm0.transpose(0,2,1)\n dm10 = numpy.random.random((2,3,nao,nao))\n dm10 = dm10 - dm10.transpose(0,1,3,2)\n print(lib.finger(make_para_soc2e(gobj, dm0, dm10)) - 0.011068947999868441)\n print(lib.finger(make_dia_gc2e(gobj, dm0, (.5,1,2))) - -0.0058333522256648652)\n print(lib.finger(make_dia_gc2e(gobj, dm0, None)) - 0.0015992772016390443)\n", "#!/usr/bin/env python\n# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\nimport sys\nimport copy\nfrom functools import reduce\nimport numpy\nfrom pyscf import lib\nfrom pyscf import gto\nfrom pyscf.lib import logger\nfrom pyscf import fci\nfrom pyscf import scf\nfrom pyscf import symm\nfrom pyscf import __config__\n\nBASE = getattr(__config__, 'mcscf_addons_sort_mo_base', 1)\nMAP2HF_TOL = getattr(__config__, 'mcscf_addons_map2hf_tol', 0.4)\n\nif sys.version_info < (3,):\n RANGE_TYPE = list\nelse:\n RANGE_TYPE = range\n\n\ndef sort_mo(casscf, mo_coeff, caslst, base=BASE):\n '''Pick orbitals for CAS space\n\n Args:\n casscf : an :class:`CASSCF` or :class:`CASCI` object\n\n mo_coeff : ndarray or a list of ndarray\n Orbitals for CASSCF initial guess. In the UHF-CASSCF, it's a list\n of two orbitals, for alpha and beta spin.\n caslst : list of int or nested list of int\n A list of orbital indices to represent the CAS space. In the UHF-CASSCF,\n it's consist of two lists, for alpha and beta spin.\n\n Kwargs:\n base : int\n 0-based (C-style) or 1-based (Fortran-style) caslst\n\n Returns:\n An reoreded mo_coeff, which put the orbitals given by caslst in the CAS space\n\n Examples:\n\n >>> from pyscf import gto, scf, mcscf\n >>> mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='ccpvdz', verbose=0)\n >>> mf = scf.RHF(mol)\n >>> mf.scf()\n >>> mc = mcscf.CASSCF(mf, 4, 4)\n >>> cas_list = [5,6,8,9] # pi orbitals\n >>> mo = mc.sort_mo(cas_list)\n >>> mc.kernel(mo)[0]\n -109.007378939813691\n '''\n ncore = casscf.ncore\n def ext_list(nmo, caslst):\n mask = numpy.ones(nmo, dtype=bool)\n mask[caslst] = False\n idx = numpy.where(mask)[0]\n if len(idx) + casscf.ncas != nmo:\n raise ValueError('Active space size is incompatible with caslist. '\n 'ncas = %d. caslist %s' % (casscf.ncas, caslst))\n return idx\n\n if isinstance(ncore, (int, numpy.integer)):\n nmo = mo_coeff.shape[1]\n if base != 0:\n caslst = [i-base for i in caslst]\n idx = ext_list(nmo, caslst)\n mo = numpy.hstack((mo_coeff[:,idx[:ncore]],\n mo_coeff[:,caslst],\n mo_coeff[:,idx[ncore:]]))\n\n if getattr(mo_coeff, 'orbsym', None) is not None:\n orbsym = mo_coeff.orbsym\n orbsym = numpy.hstack((orbsym[idx[:ncore]], orbsym[caslst],\n orbsym[idx[ncore:]]))\n mo = lib.tag_array(mo, orbsym=orbsym)\n return mo\n\n else: # UHF-based CASSCF\n if isinstance(caslst[0], (int, numpy.integer)):\n if base != 0:\n caslsta = [i-1 for i in caslst]\n caslst = (caslsta, caslsta)\n else: # two casspace lists, for alpha and beta\n if base != 0:\n caslst = ([i-base for i in caslst[0]],\n [i-base for i in caslst[1]])\n nmo = mo_coeff[0].shape[1]\n idxa = ext_list(nmo, caslst[0])\n mo_a = numpy.hstack((mo_coeff[0][:,idxa[:ncore[0]]],\n mo_coeff[0][:,caslst[0]],\n mo_coeff[0][:,idxa[ncore[0]:]]))\n idxb = ext_list(nmo, caslst[1])\n mo_b = numpy.hstack((mo_coeff[1][:,idxb[:ncore[1]]],\n mo_coeff[1][:,caslst[1]],\n mo_coeff[1][:,idxb[ncore[1]:]]))\n\n if getattr(mo_coeff[0], 'orbsym', None) is not None:\n orbsyma, orbsymb = mo_coeff[0].orbsym, mo_coeff[1].orbsym\n orbsyma = numpy.hstack((orbsyma[idxa[:ncore[0]]], orbsyma[caslst[0]],\n orbsyma[idxa[ncore[0]:]]))\n orbsymb = numpy.hstack((orbsymb[idxb[:ncore[1]]], orbsymb[caslst[1]],\n orbsymb[idxb[ncore[1]:]]))\n mo_a = lib.tag_array(mo_a, orbsym=orbsyma)\n mo_b = lib.tag_array(mo_b, orbsym=orbsymb)\n return (mo_a, mo_b)\n\ndef select_mo_by_irrep(casscf, cas_occ_num, mo = None, base=BASE):\n raise RuntimeError('This function has been replaced by function caslst_by_irrep')\n\ndef caslst_by_irrep(casscf, mo_coeff, cas_irrep_nocc,\n cas_irrep_ncore=None, s=None, base=BASE):\n '''Given number of active orbitals for each irrep, return the orbital\n indices of active space\n\n Args:\n casscf : an :class:`CASSCF` or :class:`CASCI` object\n\n cas_irrep_nocc : list or dict\n Number of active orbitals for each irrep. It can be a dict, eg\n {'A1': 2, 'B2': 4} to indicate the active space size based on\n irrep names, or {0: 2, 3: 4} for irrep Id, or a list [2, 0, 0, 4]\n (identical to {0: 2, 3: 4}) in which the list index is served as\n the irrep Id.\n\n Kwargs:\n cas_irrep_ncore : list or dict\n Number of closed shells for each irrep. It can be a dict, eg\n {'A1': 6, 'B2': 4} to indicate the closed shells based on\n irrep names, or {0: 6, 3: 4} for irrep Id, or a list [6, 0, 0, 4]\n (identical to {0: 6, 3: 4}) in which the list index is served as\n the irrep Id. If cas_irrep_ncore is not given, the program\n will generate a guess based on the lowest :attr:`CASCI.ncore`\n orbitals.\n s : ndarray\n overlap matrix\n base : int\n 0-based (C-like) or 1-based (Fortran-like) caslst\n\n Returns:\n A list of orbital indices\n\n Examples:\n\n >>> from pyscf import gto, scf, mcscf\n >>> mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='ccpvtz', symmetry=True, verbose=0)\n >>> mf = scf.RHF(mol)\n >>> mf.kernel()\n >>> mc = mcscf.CASSCF(mf, 12, 4)\n >>> mcscf.caslst_by_irrep(mc, mf.mo_coeff, {'E1gx':4, 'E1gy':4, 'E1ux':2, 'E1uy':2})\n [5, 7, 8, 10, 11, 14, 15, 20, 25, 26, 31, 32]\n '''\n mol = casscf.mol\n log = logger.Logger(casscf.stdout, casscf.verbose)\n orbsym = numpy.asarray(scf.hf_symm.get_orbsym(mol, mo_coeff))\n ncore = casscf.ncore\n\n irreps = set(orbsym)\n\n if cas_irrep_ncore is not None:\n irrep_ncore = {}\n for k, v in cas_irrep_ncore.items():\n if isinstance(k, str):\n irrep_ncore[symm.irrep_name2id(mol.groupname, k)] = v\n else:\n irrep_ncore[k] = v\n\n ncore_rest = ncore - sum(irrep_ncore.values())\n if ncore_rest > 0: # guess core configuration\n mask = numpy.ones(len(orbsym), dtype=bool)\n for ir in irrep_ncore:\n mask[orbsym == ir] = False\n core_rest = orbsym[mask][:ncore_rest]\n core_rest = dict([(ir, numpy.count_nonzero(core_rest==ir))\n for ir in set(core_rest)])\n log.info('Given core space %s < casscf core size %d',\n cas_irrep_ncore, ncore)\n log.info('Add %s to core configuration', core_rest)\n irrep_ncore.update(core_rest)\n elif ncore_rest < 0:\n raise ValueError('Given core space %s > casscf core size %d'\n % (cas_irrep_ncore, ncore))\n else:\n irrep_ncore = dict([(ir, sum(orbsym[:ncore]==ir)) for ir in irreps])\n\n if not isinstance(cas_irrep_nocc, dict):\n # list => dict\n cas_irrep_nocc = dict([(ir, n) for ir,n in enumerate(cas_irrep_nocc)\n if n > 0])\n\n irrep_ncas = {}\n for k, v in cas_irrep_nocc.items():\n if isinstance(k, str):\n irrep_ncas[symm.irrep_name2id(mol.groupname, k)] = v\n else:\n irrep_ncas[k] = v\n\n ncas_rest = casscf.ncas - sum(irrep_ncas.values())\n if ncas_rest > 0:\n mask = numpy.ones(len(orbsym), dtype=bool)\n# remove core and specified active space\n for ir in irrep_ncas:\n mask[orbsym == ir] = False\n for ir, ncore in irrep_ncore.items():\n idx = numpy.where(orbsym == ir)[0]\n mask[idx[:ncore]] = False\n\n cas_rest = orbsym[mask][:ncas_rest]\n cas_rest = dict([(ir, numpy.count_nonzero(cas_rest==ir))\n for ir in set(cas_rest)])\n log.info('Given active space %s < casscf active space size %d',\n cas_irrep_nocc, casscf.ncas)\n log.info('Add %s to active space', cas_rest)\n irrep_ncas.update(cas_rest)\n elif ncas_rest < 0:\n raise ValueError('Given active space %s > casscf active space size %d'\n % (cas_irrep_nocc, casscf.ncas))\n\n caslst = []\n for ir, ncas in irrep_ncas.items():\n if ncas > 0:\n if ir in irrep_ncore:\n nc = irrep_ncore[ir]\n else:\n nc = 0\n no = nc + ncas\n idx = numpy.where(orbsym == ir)[0]\n caslst.extend(idx[nc:no])\n caslst = numpy.sort(numpy.asarray(caslst)) + base\n if len(caslst) < casscf.ncas:\n raise ValueError('Not enough orbitals found for core %s, cas %s' %\n (cas_irrep_ncore, cas_irrep_nocc))\n\n if log.verbose >= logger.INFO:\n log.info('ncore for each irreps %s',\n dict([(symm.irrep_id2name(mol.groupname, k), v)\n for k,v in irrep_ncore.items()]))\n log.info('ncas for each irreps %s',\n dict([(symm.irrep_id2name(mol.groupname, k), v)\n for k,v in irrep_ncas.items()]))\n log.info('(%d-based) caslst = %s', base, caslst)\n return caslst\n\ndef sort_mo_by_irrep(casscf, mo_coeff, cas_irrep_nocc,\n cas_irrep_ncore=None, s=None):\n '''Given number of active orbitals for each irrep, construct the mo initial\n guess for CASSCF\n\n Args:\n casscf : an :class:`CASSCF` or :class:`CASCI` object\n\n cas_irrep_nocc : list or dict\n Number of active orbitals for each irrep. It can be a dict, eg\n {'A1': 2, 'B2': 4} to indicate the active space size based on\n irrep names, or {0: 2, 3: 4} for irrep Id, or a list [2, 0, 0, 4]\n (identical to {0: 2, 3: 4}) in which the list index is served as\n the irrep Id.\n\n Kwargs:\n cas_irrep_ncore : list or dict\n Number of closed shells for each irrep. It can be a dict, eg\n {'A1': 6, 'B2': 4} to indicate the closed shells based on\n irrep names, or {0: 6, 3: 4} for irrep Id, or a list [6, 0, 0, 4]\n (identical to {0: 6, 3: 4}) in which the list index is served as\n the irrep Id. If cas_irrep_ncore is not given, the program\n will generate a guess based on the lowest :attr:`CASCI.ncore`\n orbitals.\n s : ndarray\n overlap matrix\n\n Returns:\n sorted orbitals, ordered as [c,..,c,a,..,a,v,..,v]\n\n Examples:\n\n >>> from pyscf import gto, scf, mcscf\n >>> mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='ccpvtz', symmetry=True, verbose=0)\n >>> mf = scf.RHF(mol)\n >>> mf.kernel()\n >>> mc = mcscf.CASSCF(mf, 12, 4)\n >>> mo = mc.sort_mo_by_irrep({'E1gx':4, 'E1gy':4, 'E1ux':2, 'E1uy':2})\n >>> # Same to mo = sort_mo_by_irrep(mc, mf.mo_coeff, {2: 4, 3: 4, 6: 2, 7: 2})\n >>> # Same to mo = sort_mo_by_irrep(mc, mf.mo_coeff, [0, 0, 4, 4, 0, 0, 2, 2])\n >>> mc.kernel(mo)[0]\n -108.162863845084\n '''\n caslst = caslst_by_irrep(casscf, mo_coeff, cas_irrep_nocc,\n cas_irrep_ncore, s, base=0)\n return sort_mo(casscf, mo_coeff, caslst, base=0)\n\n\ndef project_init_guess(casscf, init_mo, prev_mol=None):\n '''Project the given initial guess to the current CASSCF problem. The\n projected initial guess has two parts. The core orbitals are directly\n taken from the Hartree-Fock orbitals, and the active orbitals are\n projected from the given initial guess.\n\n Args:\n casscf : an :class:`CASSCF` or :class:`CASCI` object\n\n init_mo : ndarray or list of ndarray\n Initial guess orbitals which are not orth-normal for the current\n molecule. When the casscf is UHF-CASSCF, the init_mo needs to be\n a list of two ndarrays, for alpha and beta orbitals\n\n Kwargs:\n prev_mol : an instance of :class:`Mole`\n If given, the inital guess orbitals are associated to the geometry\n and basis of prev_mol. Otherwise, the orbitals are based of\n the geometry and basis of casscf.mol\n\n Returns:\n New orthogonal initial guess orbitals with the core taken from\n Hartree-Fock orbitals and projected active space from original initial\n guess orbitals\n\n Examples:\n\n .. code:: python\n\n import numpy\n from pyscf import gto, scf, mcscf\n mol = gto.Mole()\n mol.build(atom='H 0 0 0; F 0 0 0.8', basis='ccpvdz', verbose=0)\n mf = scf.RHF(mol)\n mf.scf()\n mc = mcscf.CASSCF(mf, 6, 6)\n mo = mcscf.sort_mo(mc, mf.mo_coeff, [3,4,5,6,8,9])\n print('E(0.8) = %.12f' % mc.kernel(mo)[0])\n init_mo = mc.mo_coeff\n for b in numpy.arange(1.0, 3., .2):\n mol.atom = [['H', (0, 0, 0)], ['F', (0, 0, b)]]\n mol.build(0, 0)\n mf = scf.RHF(mol)\n mf.scf()\n mc = mcscf.CASSCF(mf, 6, 6)\n mo = mcscf.project_init_guess(mc, init_mo)\n print('E(%2.1f) = %.12f' % (b, mc.kernel(mo)[0]))\n init_mo = mc.mo_coeff\n '''\n from pyscf import lo\n\n def project(mfmo, init_mo, ncore, s):\n s_init_mo = numpy.einsum('pi,pi->i', init_mo.conj(), s.dot(init_mo))\n if abs(s_init_mo - 1).max() < 1e-7 and mfmo.shape[1] == init_mo.shape[1]:\n # Initial guess orbitals are orthonormal\n return init_mo\n# TODO: test whether the canonicalized orbitals are better than the projected orbitals\n# Be careful that the ordering of the canonicalized orbitals may be very different\n# to the CASSCF orbitals.\n# else:\n# fock = casscf.get_fock(mc, init_mo, casscf.ci)\n# return casscf._scf.eig(fock, s)[1]\n\n nocc = ncore + casscf.ncas\n if ncore > 0:\n mo0core = init_mo[:,:ncore]\n s1 = reduce(numpy.dot, (mfmo.T, s, mo0core))\n s1core = reduce(numpy.dot, (mo0core.T, s, mo0core))\n coreocc = numpy.einsum('ij,ji->i', s1, lib.cho_solve(s1core, s1.T))\n coreidx = numpy.sort(numpy.argsort(-coreocc)[:ncore])\n logger.debug(casscf, 'Core indices %s', coreidx)\n logger.debug(casscf, 'Core components %s', coreocc[coreidx])\n # take HF core\n mocore = mfmo[:,coreidx]\n\n # take projected CAS space\n mocas = init_mo[:,ncore:nocc] \\\n - reduce(numpy.dot, (mocore, mocore.T, s, init_mo[:,ncore:nocc]))\n mocc = lo.orth.vec_lowdin(numpy.hstack((mocore, mocas)), s)\n else:\n mocc = lo.orth.vec_lowdin(init_mo[:,:nocc], s)\n\n # remove core and active space from rest\n if mocc.shape[1] < mfmo.shape[1]:\n if casscf.mol.symmetry:\n restorb = []\n orbsym = scf.hf_symm.get_orbsym(casscf.mol, mfmo, s)\n for ir in set(orbsym):\n mo_ir = mfmo[:,orbsym==ir]\n rest = mo_ir - reduce(numpy.dot, (mocc, mocc.T, s, mo_ir))\n e, u = numpy.linalg.eigh(reduce(numpy.dot, (rest.T, s, rest)))\n restorb.append(numpy.dot(rest, u[:,e>1e-7]))\n restorb = numpy.hstack(restorb)\n else:\n rest = mfmo - reduce(numpy.dot, (mocc, mocc.T, s, mfmo))\n e, u = numpy.linalg.eigh(reduce(numpy.dot, (rest.T, s, rest)))\n restorb = numpy.dot(rest, u[:,e>1e-7])\n mo = numpy.hstack((mocc, restorb))\n else:\n mo = mocc\n\n if casscf.verbose >= logger.DEBUG:\n s1 = reduce(numpy.dot, (mo[:,ncore:nocc].T, s, mfmo))\n idx = numpy.argwhere(abs(s1) > 0.4)\n for i,j in idx:\n logger.debug(casscf, 'Init guess <mo-CAS|mo-hf> %d %d %12.8f',\n ncore+i+1, j+1, s1[i,j])\n return mo\n\n ncore = casscf.ncore\n mfmo = casscf._scf.mo_coeff\n s = casscf._scf.get_ovlp()\n if prev_mol is None:\n if init_mo.shape[0] != mfmo.shape[0]:\n raise RuntimeError('Initial guess orbitals has wrong dimension')\n elif gto.same_mol(prev_mol, casscf.mol, cmp_basis=False):\n if isinstance(ncore, (int, numpy.integer)): # RHF\n init_mo = scf.addons.project_mo_nr2nr(prev_mol, init_mo, casscf.mol)\n else:\n init_mo = (scf.addons.project_mo_nr2nr(prev_mol, init_mo[0], casscf.mol),\n scf.addons.project_mo_nr2nr(prev_mol, init_mo[1], casscf.mol))\n elif gto.same_basis_set(prev_mol, casscf.mol):\n if isinstance(ncore, (int, numpy.integer)): # RHF\n fock = casscf.get_fock(init_mo, casscf.ci)\n return casscf._scf.eig(fock, s)[1]\n else:\n raise NotImplementedError('Project initial for UHF orbitals.')\n else:\n raise NotImplementedError('Project initial guess from different system.')\n\n# Be careful with the orbital projection. The projection may lead to bad\n# initial guess orbitals if the geometry is dramatically changed.\n if isinstance(ncore, (int, numpy.integer)):\n mo = project(mfmo, init_mo, ncore, s)\n else: # UHF-based CASSCF\n mo = (project(mfmo[0], init_mo[0], ncore[0], s),\n project(mfmo[1], init_mo[1], ncore[1], s))\n return mo\n\n# on AO representation\ndef make_rdm1(casscf, mo_coeff=None, ci=None, **kwargs):\n '''One-particle densit matrix in AO representation\n\n Args:\n casscf : an :class:`CASSCF` or :class:`CASCI` object\n\n Kwargs:\n ci : ndarray\n CAS space FCI coefficients. If not given, take casscf.ci.\n mo_coeff : ndarray\n Orbital coefficients. If not given, take casscf.mo_coeff.\n\n Examples:\n\n >>> import scipy.linalg\n >>> from pyscf import gto, scf, mcscf\n >>> mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='sto-3g', verbose=0)\n >>> mf = scf.RHF(mol)\n >>> res = mf.scf()\n >>> mc = mcscf.CASSCF(mf, 6, 6)\n >>> res = mc.kernel()\n >>> natocc = numpy.linalg.eigh(mcscf.make_rdm1(mc), mf.get_ovlp(), type=2)[0]\n >>> print(natocc)\n [ 0.0121563 0.0494735 0.0494735 1.95040395 1.95040395 1.98808879\n 2. 2. 2. 2. ]\n '''\n return casscf.make_rdm1(mo_coeff, ci, **kwargs)\n\n# make both alpha and beta density matrices\ndef make_rdm1s(casscf, mo_coeff=None, ci=None, **kwargs):\n '''Alpha and beta one-particle densit matrices in AO representation\n '''\n return casscf.make_rdm1s(mo_coeff, ci, **kwargs)\n\ndef _is_uhf_mo(mo_coeff):\n return not (isinstance(mo_coeff, numpy.ndarray) and mo_coeff.ndim == 2)\n\ndef _make_rdm12_on_mo(casdm1, casdm2, ncore, ncas, nmo):\n nocc = ncas + ncore\n dm1 = numpy.zeros((nmo,nmo))\n idx = numpy.arange(ncore)\n dm1[idx,idx] = 2\n dm1[ncore:nocc,ncore:nocc] = casdm1\n\n dm2 = numpy.zeros((nmo,nmo,nmo,nmo))\n dm2[ncore:nocc,ncore:nocc,ncore:nocc,ncore:nocc] = casdm2\n for i in range(ncore):\n for j in range(ncore):\n dm2[i,i,j,j] += 4\n dm2[i,j,j,i] += -2\n dm2[i,i,ncore:nocc,ncore:nocc] = dm2[ncore:nocc,ncore:nocc,i,i] =2*casdm1\n dm2[i,ncore:nocc,ncore:nocc,i] = dm2[ncore:nocc,i,i,ncore:nocc] = -casdm1\n return dm1, dm2\n\n# on AO representation\ndef make_rdm12(casscf, mo_coeff=None, ci=None):\n if ci is None: ci = casscf.ci\n if mo_coeff is None: mo_coeff = casscf.mo_coeff\n assert(not _is_uhf_mo(mo_coeff))\n nelecas = casscf.nelecas\n ncas = casscf.ncas\n ncore = casscf.ncore\n nmo = mo_coeff.shape[1]\n casdm1, casdm2 = casscf.fcisolver.make_rdm12(ci, ncas, nelecas)\n rdm1, rdm2 = _make_rdm12_on_mo(casdm1, casdm2, ncore, ncas, nmo)\n rdm1 = reduce(numpy.dot, (mo_coeff, rdm1, mo_coeff.T))\n rdm2 = numpy.dot(mo_coeff, rdm2.reshape(nmo,-1))\n rdm2 = numpy.dot(rdm2.reshape(-1,nmo), mo_coeff.T)\n rdm2 = rdm2.reshape(nmo,nmo,nmo,nmo).transpose(2,3,0,1)\n rdm2 = numpy.dot(mo_coeff, rdm2.reshape(nmo,-1))\n rdm2 = numpy.dot(rdm2.reshape(-1,nmo), mo_coeff.T)\n return rdm1, rdm2.reshape(nmo,nmo,nmo,nmo)\n\ndef get_fock(casscf, mo_coeff=None, ci=None):\n '''Generalized Fock matrix in AO representation\n '''\n if mo_coeff is None: mo_coeff = casscf.mo_coeff\n if _is_uhf_mo(mo_coeff):\n raise RuntimeError('TODO: UCAS general fock')\n else:\n return casscf.get_fock(mo_coeff, ci)\n\ndef cas_natorb(casscf, mo_coeff=None, ci=None, sort=False):\n '''Natrual orbitals in CAS space\n '''\n if mo_coeff is None: mo_coeff = casscf.mo_coeff\n if _is_uhf_mo(mo_coeff):\n raise RuntimeError('TODO: UCAS natrual orbitals')\n else:\n return casscf.cas_natorb(mo_coeff, ci, sort=sort)\n\ndef map2hf(casscf, mf_mo=None, base=BASE, tol=MAP2HF_TOL):\n '''The overlap between the CASSCF optimized orbitals and the canonical HF orbitals.\n '''\n if mf_mo is None: mf_mo = casscf._scf.mo_coeff\n s = casscf.mol.intor_symmetric('int1e_ovlp')\n s = reduce(numpy.dot, (casscf.mo_coeff.T, s, mf_mo))\n idx = numpy.argwhere(abs(s) > tol)\n for i,j in idx:\n logger.info(casscf, '<mo_coeff-mcscf|mo_coeff-hf> %d %d %12.8f',\n i+base, j+base, s[i,j])\n return idx\n\ndef spin_square(casscf, mo_coeff=None, ci=None, ovlp=None):\n '''Spin square of the UHF-CASSCF wavefunction\n\n Returns:\n A list of two floats. The first is the expectation value of S^2.\n The second is the corresponding 2S+1\n\n Examples:\n\n >>> from pyscf import gto, scf, mcscf\n >>> mol = gto.M(atom='O 0 0 0; O 0 0 1', basis='sto-3g', spin=2, verbose=0)\n >>> mf = scf.UHF(mol)\n >>> res = mf.scf()\n >>> mc = mcscf.CASSCF(mf, 4, 6)\n >>> res = mc.kernel()\n >>> print('S^2 = %.7f, 2S+1 = %.7f' % mcscf.spin_square(mc))\n S^2 = 3.9831589, 2S+1 = 4.1149284\n '''\n if ci is None: ci = casscf.ci\n ncore = casscf.ncore\n ncas = casscf.ncas\n nelecas = casscf.nelecas\n if isinstance(ncore, (int, numpy.integer)):\n return fci.spin_op.spin_square0(ci, ncas, nelecas)\n else:\n if mo_coeff is None: mo_coeff = casscf.mo_coeff\n if ovlp is None: ovlp = casscf._scf.get_ovlp()\n nocc = (ncore[0] + ncas, ncore[1] + ncas)\n mocas = (mo_coeff[0][:,ncore[0]:nocc[0]], mo_coeff[1][:,ncore[1]:nocc[1]])\n if isinstance(ci, (list, tuple, RANGE_TYPE)):\n sscas = numpy.array([fci.spin_op.spin_square(c, ncas, nelecas, mocas, ovlp)[0]\n for c in ci])\n else:\n sscas = fci.spin_op.spin_square(ci, ncas, nelecas, mocas, ovlp)[0]\n mocore = (mo_coeff[0][:,:ncore[0]], mo_coeff[1][:,:ncore[1]])\n sscore = casscf._scf.spin_square(mocore, ovlp)[0]\n logger.debug(casscf, 'S^2 of core %s S^2 of cas %s', sscore, sscas)\n ss = sscas+sscore\n s = numpy.sqrt(ss+.25) - .5\n return ss, s*2+1\n\n# A tag to label the derived FCI class\nclass StateAverageFCISolver:\n pass\n\ndef state_average_(casscf, weights=(0.5,0.5)):\n ''' State average over the energy. The energy funcitonal is\n E = w1<psi1|H|psi1> + w2<psi2|H|psi2> + ...\n\n Note we may need change the FCI solver to\n\n mc.fcisolver = fci.solver(mol, False)\n\n before calling state_average_(mc), to mix the singlet and triplet states\n '''\n assert(abs(sum(weights)-1) < 1e-3)\n fcibase_class = casscf.fcisolver.__class__\n if fcibase_class.__name__ == 'FakeCISolver':\n raise TypeError('mc.fcisolver is not base FCI solver\\n'\n 'state_average function cannot work with decorated '\n 'fcisolver %s.\\nYou can restore the base fcisolver '\n 'then call state_average function, eg\\n'\n ' mc.fcisolver = %s.%s(mc.mol)\\n'\n ' mc.state_average_()\\n' %\n (casscf.fcisolver, fcibase_class.__base__.__module__,\n fcibase_class.__base__.__name__))\n has_spin_square = getattr(casscf.fcisolver, 'spin_square', None)\n e_states = [None]\n\n class FakeCISolver(fcibase_class, StateAverageFCISolver):\n def __init__(self, mol=None):\n self.nroots = len(weights)\n def kernel(self, h1, h2, norb, nelec, ci0=None, **kwargs):\n# pass self to fcibase_class.kernel function because orbsym argument is stored in self\n# but undefined in fcibase object\n e, c = fcibase_class.kernel(self, h1, h2, norb, nelec, ci0,\n nroots=self.nroots, **kwargs)\n e_states[0] = e\n if casscf.verbose >= logger.DEBUG:\n if has_spin_square:\n for i, ei in enumerate(e):\n ss = fcibase_class.spin_square(self, c[i], norb, nelec)\n logger.debug(casscf, 'state %d E = %.15g S^2 = %.7f',\n i, ei, ss[0])\n else:\n for i, ei in enumerate(e):\n logger.debug(casscf, 'state %d E = %.15g', i, ei)\n return numpy.einsum('i,i->', e, weights), c\n def approx_kernel(self, h1, h2, norb, nelec, ci0=None, **kwargs):\n e, c = fcibase_class.kernel(self, h1, h2, norb, nelec, ci0,\n max_cycle=casscf.ci_response_space,\n nroots=self.nroots, **kwargs)\n return numpy.einsum('i,i->', e, weights), c\n def make_rdm1(self, ci0, norb, nelec, *args, **kwargs):\n dm1 = 0\n for i, wi in enumerate(weights):\n dm1 += wi * fcibase_class.make_rdm1(self, ci0[i], norb, nelec, *args, **kwargs)\n return dm1\n def make_rdm1s(self, ci0, norb, nelec, *args, **kwargs):\n dm1a, dm1b = 0, 0\n for i, wi in enumerate(weights):\n dm1s = fcibase_class.make_rdm1s(self, ci0[i], norb, nelec, *args, **kwargs)\n dm1a += wi * dm1s[0]\n dm1b += wi * dm1s[1]\n return dm1a, dm1b\n def make_rdm12(self, ci0, norb, nelec, *args, **kwargs):\n rdm1 = 0\n rdm2 = 0\n for i, wi in enumerate(weights):\n dm1, dm2 = fcibase_class.make_rdm12(self, ci0[i], norb, nelec, *args, **kwargs)\n rdm1 += wi * dm1\n rdm2 += wi * dm2\n return rdm1, rdm2\n\n if has_spin_square:\n def spin_square(self, ci0, norb, nelec, *args, **kwargs):\n ss = 0\n multip = 0\n for i, wi in enumerate(weights):\n res = fcibase_class.spin_square(self, ci0[i], norb, nelec, *args, **kwargs)\n ss += wi * res[0]\n multip += wi * res[1]\n return ss, multip\n\n fcisolver = FakeCISolver(casscf.mol)\n fcisolver.__dict__.update(casscf.fcisolver.__dict__)\n fcisolver.nroots = len(weights)\n casscf.fcisolver = fcisolver\n\n old_finalize = casscf._finalize\n def _finalize():\n old_finalize()\n casscf.e_tot = e_states[0]\n logger.note(casscf, 'CASCI energy for each state')\n if has_spin_square:\n ncas = casscf.ncas\n nelecas = casscf.nelecas\n for i, ei in enumerate(casscf.e_tot):\n ss = fcibase_class.spin_square(casscf.fcisolver, casscf.ci[i],\n ncas, nelecas)[0]\n logger.note(casscf, ' State %d weight %g E = %.15g S^2 = %.7f',\n i, weights[i], ei, ss)\n else:\n for i, ei in enumerate(casscf.e_tot):\n logger.note(casscf, ' State %d weight %g E = %.15g',\n i, weights[i], ei)\n return casscf\n casscf._finalize = _finalize\n return casscf\nstate_average = state_average_\n\n\ndef state_specific_(casscf, state=1):\n '''For excited state\n\n Kwargs:\n state : int\n 0 for ground state; 1 for first excited state.\n '''\n fcibase_class = casscf.fcisolver.__class__\n if fcibase_class.__name__ == 'FakeCISolver':\n raise TypeError('mc.fcisolver is not base FCI solver\\n'\n 'state_specific function cannot work with decorated '\n 'fcisolver %s.\\nYou can restore the base fcisolver '\n 'then call state_specific function, eg\\n'\n ' mc.fcisolver = %s.%s(mc.mol)\\n'\n ' mc.state_specific_()\\n' %\n (casscf.fcisolver, fcibase_class.__base__.__module__,\n fcibase_class.__base__.__name__))\n class FakeCISolver(fcibase_class, StateAverageFCISolver):\n def __init__(self):\n self.nroots = state+1\n self._civec = None\n def kernel(self, h1, h2, norb, nelec, ci0=None, **kwargs):\n if self._civec is not None:\n ci0 = self._civec\n e, c = fcibase_class.kernel(self, h1, h2, norb, nelec, ci0,\n nroots=self.nroots, **kwargs)\n if state == 0:\n e = [e]\n c = [c]\n self._civec = c\n if casscf.verbose >= logger.DEBUG:\n if getattr(fcibase_class, 'spin_square', None):\n ss = fcibase_class.spin_square(self, c[state], norb, nelec)\n logger.debug(casscf, 'state %d E = %.15g S^2 = %.7f',\n state, e[state], ss[0])\n else:\n logger.debug(casscf, 'state %d E = %.15g', state, e[state])\n return e[state], c[state]\n def approx_kernel(self, h1, h2, norb, nelec, ci0=None, **kwargs):\n if self._civec is not None:\n ci0 = self._civec\n e, c = fcibase_class.kernel(self, h1, h2, norb, nelec, ci0,\n max_cycle=casscf.ci_response_space,\n nroots=self.nroots, **kwargs)\n if state == 0:\n self._civec = [c]\n return e, c\n else:\n self._civec = c\n return e[state], c[state]\n\n fcisolver = FakeCISolver()\n fcisolver.__dict__.update(casscf.fcisolver.__dict__)\n fcisolver.nroots = state+1\n casscf.fcisolver = fcisolver\n return casscf\nstate_specific = state_specific_\n\ndef state_average_mix_(casscf, fcisolvers, weights=(0.5,0.5)):\n '''State-average CASSCF over multiple FCI solvers.\n '''\n fcibase_class = fcisolvers[0].__class__\n# if fcibase_class.__name__ == 'FakeCISolver':\n# logger.warn(casscf, 'casscf.fcisolver %s is a decorated FCI solver. '\n# 'state_average_mix_ function rolls back to the base solver %s',\n# fcibase_class, fcibase_class.__base__)\n# fcibase_class = fcibase_class.__base__\n nroots = sum(solver.nroots for solver in fcisolvers)\n assert(nroots == len(weights))\n has_spin_square = all(getattr(solver, 'spin_square', None)\n for solver in fcisolvers)\n has_large_ci = all(getattr(solver, 'large_ci', None)\n for solver in fcisolvers)\n e_states = [None]\n\n def collect(items):\n items = list(items)\n cols = [[item[i] for item in items] for i in range(len(items[0]))]\n return cols\n def loop_solver(solvers, ci0):\n p0 = 0\n for solver in solvers:\n if ci0 is None:\n yield solver, None\n elif solver.nroots == 1:\n yield solver, ci0[p0]\n else:\n yield solver, ci0[p0:p0+solver.nroots]\n p0 += solver.nroots\n def loop_civecs(solvers, ci0):\n p0 = 0\n for solver in solvers:\n for i in range(p0, p0+solver.nroots):\n yield solver, ci0[i]\n p0 += solver.nroots\n def get_nelec(solver, nelec):\n # FCISolver does not need this function. Some external solver may not\n # have the function to handle nelec and spin\n if solver.spin is not None:\n nelec = numpy.sum(nelec)\n nelec = (nelec+solver.spin)//2, (nelec-solver.spin)//2\n return nelec\n\n class FakeCISolver(fcibase_class, StateAverageFCISolver):\n def kernel(self, h1, h2, norb, nelec, ci0=None, verbose=0, **kwargs):\n# Note self.orbsym is initialized lazily in mc1step_symm.kernel function\n log = logger.new_logger(self, verbose)\n es = []\n cs = []\n for solver, c0 in loop_solver(fcisolvers, ci0):\n e, c = solver.kernel(h1, h2, norb, get_nelec(solver, nelec), c0,\n orbsym=self.orbsym, verbose=log, **kwargs)\n if solver.nroots == 1:\n es.append(e)\n cs.append(c)\n else:\n es.extend(e)\n cs.extend(c)\n e_states[0] = es\n\n if log.verbose >= logger.DEBUG:\n if has_spin_square:\n ss, multip = collect(solver.spin_square(c0, norb, get_nelec(solver, nelec))\n for solver, c0 in loop_civecs(fcisolvers, cs))\n for i, ei in enumerate(es):\n log.debug('state %d E = %.15g S^2 = %.7f', i, ei, ss[i])\n else:\n for i, ei in enumerate(es):\n log.debug('state %d E = %.15g', i, ei)\n return numpy.einsum('i,i', numpy.array(es), weights), cs\n\n def approx_kernel(self, h1, h2, norb, nelec, ci0=None, **kwargs):\n es = []\n cs = []\n for solver, c0 in loop_solver(fcisolvers, ci0):\n e, c = solver.kernel(h1, h2, norb, get_nelec(solver, nelec), c0,\n orbsym=self.orbsym, **kwargs)\n if solver.nroots == 1:\n es.append(e)\n cs.append(c)\n else:\n es.extend(e)\n cs.extend(c)\n return numpy.einsum('i,i->', es, weights), cs\n def make_rdm1(self, ci0, norb, nelec, **kwargs):\n dm1 = 0\n for i, (solver, c) in enumerate(loop_civecs(fcisolvers, ci0)):\n dm1 += weights[i]*solver.make_rdm1(c, norb, get_nelec(solver, nelec), **kwargs)\n return dm1\n def make_rdm1s(self, ci0, norb, nelec, **kwargs):\n dm1a, dm1b = 0, 0\n for i, (solver, c) in enumerate(loop_civecs(fcisolvers, ci0)):\n dm1s = solver.make_rdm1s(c, norb, get_nelec(solver, nelec), **kwargs)\n dm1a += weights[i] * dm1s[0]\n dm1b += weights[i] * dm1s[1]\n return dm1a, dm1b\n def make_rdm12(self, ci0, norb, nelec, **kwargs):\n rdm1 = 0\n rdm2 = 0\n for i, (solver, c) in enumerate(loop_civecs(fcisolvers, ci0)):\n dm1, dm2 = solver.make_rdm12(c, norb, get_nelec(solver, nelec), **kwargs)\n rdm1 += weights[i] * dm1\n rdm2 += weights[i] * dm2\n return rdm1, rdm2\n\n large_ci = None\n transform_ci_for_orbital_rotation = None\n\n if has_spin_square:\n def spin_square(self, ci0, norb, nelec):\n ss = 0\n multip = 0\n for i, (solver, c) in enumerate(loop_civecs(fcisolvers, ci0)):\n res = solver.spin_square(c, norb, nelec)\n ss += weights[i] * res[0]\n multip += weights[i] * res[1]\n return ss, multip\n\n fcisolver = FakeCISolver(casscf.mol)\n fcisolver.__dict__.update(casscf.fcisolver.__dict__)\n fcisolver.fcisolvers = fcisolvers\n casscf.fcisolver = fcisolver\n\n old_finalize = casscf._finalize\n def _finalize():\n old_finalize()\n casscf.e_tot = e_states[0]\n logger.note(casscf, 'CASCI energy for each state')\n if has_spin_square:\n ncas = casscf.ncas\n nelecas = casscf.nelecas\n ss, multip = collect(solver.spin_square(c0, ncas, get_nelec(solver, nelecas))\n for solver, c0 in loop_civecs(fcisolvers, casscf.ci))\n for i, ei in enumerate(casscf.e_tot):\n logger.note(casscf, ' State %d weight %g E = %.15g S^2 = %.7f',\n i, weights[i], ei, ss[i])\n else:\n for i, ei in enumerate(casscf.e_tot):\n logger.note(casscf, ' State %d weight %g E = %.15g',\n i, weights[i], ei)\n return casscf\n casscf._finalize = _finalize\n return casscf\nstate_average_mix = state_average_mix_\n\ndel(BASE, MAP2HF_TOL)\n\n\nif __name__ == '__main__':\n from pyscf import gto\n from pyscf import mcscf\n from pyscf.tools import ring\n\n mol = gto.Mole()\n mol.verbose = 0\n mol.output = None\n mol.atom = [['H', c] for c in ring.make(6, 1.2)]\n mol.basis = '6-31g'\n mol.build()\n\n m = scf.RHF(mol)\n ehf = m.scf()\n\n mc = mcscf.CASSCF(m, 6, 6)\n mc.verbose = 4\n emc, e_ci, fcivec, mo, mo_energy = mc.mc1step()\n print(ehf, emc, emc-ehf)\n print(emc - -3.272089958)\n\n rdm1 = make_rdm1(mc, mo, fcivec)\n rdm1, rdm2 = make_rdm12(mc, mo, fcivec)\n print(rdm1)\n\n mo1 = cas_natorb(mc)[0]\n numpy.set_printoptions(2)\n print(reduce(numpy.dot, (mo1[:,:6].T, mol.intor('int1e_ovlp_sph'),\n mo[:,:6])))\n\n# state average\n mol.atom = [\n ['O', ( 0., 0. , 0. )],\n ['H', ( 0., -0.757, 0.587)],\n ['H', ( 0., 0.757 , 0.587)],]\n mol.basis = '6-31g'\n mol.symmetry = 1\n mol.build()\n\n m = scf.RHF(mol)\n ehf = m.scf()\n\n\n mc = mcscf.CASSCF(m, 4, 4)\n mc.verbose = 4\n mc.fcisolver = fci.solver(mol, False) # to mix the singlet and triplet\n mc = state_average_(mc, (.64,.36))\n emc, e_ci, fcivec, mo, mo_energy = mc.mc1step()[:5]\n mc = mcscf.CASCI(m, 4, 4)\n emc = mc.casci(mo)[0]\n print(ehf, emc, emc-ehf)\n print(emc - -76.003352190262532)\n\n mc = mcscf.CASSCF(m, 4, 4)\n mc.verbose = 4\n mc = state_average_(mc, (.64,.36))\n emc, e_ci, fcivec, mo, mo_energy = mc.mc1step()[:5]\n mc = mcscf.CASCI(m, 4, 4)\n emc = mc.casci(mo)[0]\n print(ehf, emc, emc-ehf)\n print(emc - -75.982520334896776)\n\n\n mc = mcscf.CASSCF(m, 4, 4)\n mc.verbose = 4\n mc = state_specific_(mc, 2)\n emc = mc.kernel()[0]\n\n\n mc = mcscf.CASSCF(m, 4, 4)\n mc.verbose = 4\n hot_tuning_(mc, 'config1')\n mc.kernel()\n mc = None # release for gc\n" ]
[ [ "numpy.sqrt", "numpy.einsum", "numpy.tril_indices", "numpy.asarray", "numpy.ndarray.__array_wrap__", "numpy.ones", "numpy.append", "numpy.zeros_like", "numpy.argsort", "numpy.zeros", "numpy.where", "numpy.empty" ], [ "numpy.hstack", "numpy.sqrt", "numpy.reshape", "numpy.asarray", "numpy.stack", "numpy.result_type", "numpy.iscomplexobj", "numpy.array", "numpy.zeros", "numpy.empty" ], [ "numpy.arange", "numpy.dot", "numpy.array", "numpy.allclose" ], [ "numpy.dot", "numpy.linalg.svd", "numpy.random.random", "numpy.einsum", "numpy.random.seed", "numpy.asarray", "numpy.eye", "numpy.linalg.det", "numpy.argsort", "numpy.zeros" ], [ "numpy.hstack", "numpy.dot", "numpy.sum", "numpy.sqrt", "numpy.einsum", "numpy.asarray", "numpy.arange", "numpy.set_printoptions", "numpy.ones", "numpy.count_nonzero", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MISStingting/NMTmodel
[ "970115d6f9fcd015d7daf3ad0e4844055e2af5d3", "970115d6f9fcd015d7daf3ad0e4844055e2af5d3" ]
[ "NMT/dataset.py", "tests/dataset_test.py" ]
[ "import tensorflow as tf\n\n\n# tf.enable_eager_execution()\n\n\nclass Dataset(object):\n\n def get_dataset(self, params, mode):\n if mode == tf.estimator.ModeKeys.TRAIN:\n features_path = params[\"train_features_file\"]\n labels_path = params[\"train_labels_file\"]\n elif mode == tf.estimator.ModeKeys.EVAL:\n features_path = params[\"eval_features_file\"]\n labels_path = params[\"eval_labels_file\"]\n\n elif mode == tf.estimator.ModeKeys.PREDICT:\n features_path = params[\"test_features_file\"]\n labels_path = params[\"test_labels_file\"]\n else:\n raise ValueError(\"wrong mode!!!\")\n\n features_dataset, labels_dataset = self._load_dataset(features_path, labels_path, mode)\n if mode == tf.estimator.ModeKeys.PREDICT:\n dataset = features_dataset.map(lambda x: tf.string_split([x]).values)\n dataset = dataset.shuffle(buffer_size=params[\"buffer_size\"],\n reshuffle_each_iteration=params[\"reshuffle_each_iteration\"])\n dataset = dataset.prefetch(buffer_size=params[\"buffer_size\"])\n dataset = dataset.map(lambda src: (src, tf.size(src)))\n dataset = dataset.padded_batch(batch_size=params[\"batch_size\"],\n padded_shapes=(tf.TensorShape([None]), tf.TensorShape([])),\n padding_values=(tf.constant(\"<blank>\"), 0))\n iterator = dataset.make_one_shot_iterator()\n src, src_len = iterator.get_next()\n features = {\n \"input\": src,\n \"input_length\": src_len\n }\n labels = None\n else:\n dataset = tf.data.Dataset.zip((features_dataset, labels_dataset))\n dataset = dataset.map(lambda x, y: (tf.string_split([x]).values, tf.string_split([y]).values))\n dataset = dataset.repeat(params[\"repeat\"]).shuffle(buffer_size=params[\"buffer_size\"],\n reshuffle_each_iteration=params[\n \"reshuffle_each_iteration\"])\n dataset = dataset.prefetch(buffer_size=params[\"buffer_size\"])\n if params[\"src_max_len\"] > 0:\n dataset = dataset.map(\n lambda src, tgt: (src[:params[\"src_max_len\"]], tgt))\n if params[\"tgt_max_len\"] > 0:\n dataset = dataset.map(\n lambda src, tgt: (src, tgt[:params[\"tgt_max_len\"]]))\n dataset = dataset.map(\n lambda src, tgt: (src,\n tf.concat(([\"<s>\"], tgt), 0),\n tf.concat((tgt, [\"</s>\"]), 0)),\n num_parallel_calls=params[\"num_parallel_calls\"])\n dataset = dataset.map(lambda src, tgt_in, tgt_out: (src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_out)))\n dataset = dataset.padded_batch(batch_size=params[\"batch_size\"],\n padded_shapes=(\n tf.TensorShape([None]),\n tf.TensorShape([None]),\n tf.TensorShape([None]),\n tf.TensorShape([]),\n tf.TensorShape([])),\n padding_values=(\n tf.constant(\"<blank>\", dtype=tf.string),\n tf.constant(\"<s>\", dtype=tf.string),\n tf.constant(\"</s>\", dtype=tf.string),\n 0,\n 0))\n iterator = dataset.make_one_shot_iterator()\n src, tgt_in, tgt_out, input_length, output_length = iterator.get_next()\n features = {\n \"input\": src,\n \"input_length\": input_length\n }\n labels = {\n \"output_in\": tgt_in,\n \"output_out\": tgt_out,\n \"output_length\": output_length\n }\n return features, labels\n\n @staticmethod\n def _load_dataset(features_path, labels_path, mode):\n ''' 从文件读取dataset\n :param mode:\n :return:\n '''\n if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:\n features_dataset = tf.data.TextLineDataset(filenames=features_path)\n labels_dataset = tf.data.TextLineDataset(filenames=labels_path)\n\n return features_dataset, labels_dataset\n elif mode == tf.estimator.ModeKeys.PREDICT:\n features_dataset = tf.data.TextLineDataset(filenames=features_path)\n return features_dataset, None\n\n\ndata_util = Dataset()\n", "import os\nimport yaml\nimport tensorflow as tf\nfrom NMTmodel.NMT.dataset import data_util\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\npar_dir = os.path.dirname(cur_dir)\n\n\nclass DatasetTest(tf.test.TestCase):\n def setUp(self):\n self.config_file = os.path.join(par_dir, \"config.yml\")\n\n def test_dataset(self):\n with tf.gfile.GFile(self.config_file, \"rb\") as f:\n params = yaml.load(stream=f.read(), Loader=yaml.FullLoader)\n data_util.get_dataset(params, mode=tf.estimator.ModeKeys.PREDICT)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.TensorShape", "tensorflow.constant", "tensorflow.concat", "tensorflow.string_split", "tensorflow.data.Dataset.zip", "tensorflow.data.TextLineDataset", "tensorflow.size" ], [ "tensorflow.gfile.GFile", "tensorflow.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
EdisonLeeeee/GraphGallery
[ "4eec9c5136bda14809bd22584b26cc346cdb633b", "4eec9c5136bda14809bd22584b26cc346cdb633b", "4eec9c5136bda14809bd22584b26cc346cdb633b", "4eec9c5136bda14809bd22584b26cc346cdb633b", "4eec9c5136bda14809bd22584b26cc346cdb633b", "4eec9c5136bda14809bd22584b26cc346cdb633b" ]
[ "graphgallery/nn/layers/tensorflow/dropout/dropout.py", "examples/Graph_Adversarial_Learning/Untargeted/Evasion/PyTorch/Metattack.py", "graphgallery/attack/untargeted/tensorflow/metattack.py", "graphgallery/nn/models/pytorch/parinorm/sgc_pn.py", "graphgallery/attack/untargeted/pytorch/pgd.py", "graphgallery/functional/tensor/tensorflow/tensor.py" ]
[ "import tensorflow as tf\r\nimport tensorflow.keras.backend as K\r\nfrom tensorflow.keras.layers import Layer, Dropout\r\n\r\n\r\nclass SparseDropout(Layer):\r\n def __init__(self, p=0.5):\r\n super().__init__()\r\n self.p = p\r\n\r\n def call(self, x, training=None):\r\n if training is None:\r\n training = K.learning_phase()\r\n\r\n if self.p and training:\r\n values = tf.nn.dropout(x.values, self.p)\r\n return tf.SparseTensor(x.indices, values, x.dense_shape)\r\n return x\r\n\r\n\r\nclass MixedDropout(Layer):\r\n def __init__(self, p=0.5):\r\n super().__init__()\r\n self.dense_dropout = Dropout(p)\r\n self.sparse_dropout = SparseDropout(p)\r\n\r\n def call(self, x):\r\n if K.is_sparse(x):\r\n return self.sparse_dropout(x)\r\n else:\r\n return self.dense_dropout(x)\r\n", "import numpy as np\r\nimport graphgallery as gg\r\nfrom graphgallery import functional as gf\r\nfrom graphgallery.datasets import NPZDataset\r\n\r\ndata = NPZDataset('cora', root=\"~/GraphData/datasets/\", verbose=False, transform=\"standardize\")\r\ngraph = data.graph\r\nsplits = data.split_nodes(random_state=15)\r\n\r\n# use PyTorch backend\r\ngg.set_backend(\"torch\")\r\n\r\n# GPU is recommended\r\ndevice = \"gpu\"\r\n\r\n################### Surrogate model ############################\r\ntrainer = gg.gallery.nodeclas.GCN(device=device, seed=None).setup_graph(graph).build()\r\ntrainer.fit(splits.train_nodes,\r\n splits.val_nodes,\r\n verbose=1,\r\n epochs=200)\r\n\r\n################### Attacker model ############################\r\nunlabeled_nodes = np.hstack([splits.val_nodes, splits.test_nodes])\r\nself_training_labels = trainer.predict(unlabeled_nodes).argmax(1)\r\n\r\nattacker = gg.attack.untargeted.Metattack(graph, device=device, seed=123).process(splits.train_nodes,\r\n unlabeled_nodes,\r\n self_training_labels,\r\n lr=0.1, # cora lr=0.1, citeseer lr=0.01 reaches the best performance\r\n lambda_=0.,\r\n use_relu=False)\r\nattacker.attack(0.05)\r\n################### Victim model ############################\r\n# This is a white-box attack\r\n# Before attack\r\noriginal_result = trainer.evaluate(splits.test_nodes)\r\n\r\n# After attack\r\n# reprocess after the graph has changed\r\ntrainer.setup_graph(attacker.g) # important!\r\nperturbed_result = trainer.evaluate(splits.test_nodes)\r\n\r\n################### Results ############################\r\nprint(f\"original prediction {original_result.accuracy:.2%}\")\r\nprint(f\"perturbed prediction {perturbed_result.accuracy:.2%}\")\r\nprint(\r\n f\"The accuracy has gone down {original_result.accuracy-perturbed_result.accuracy:.2%}\"\r\n)\r\n", "import warnings\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.initializers import glorot_uniform, zeros\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.activations import softmax, relu\r\nfrom tensorflow.keras.losses import SparseCategoricalCrossentropy\r\n\r\nimport graphgallery as gg\r\nfrom graphgallery import functional as gf\r\nfrom graphgallery.utils import tqdm\r\n# from graphadv.utils.graph_utils import likelihood_ratio_filter\r\nfrom graphgallery.attack.untargeted import TensorFlow\r\nfrom ..untargeted_attacker import UntargetedAttacker\r\n\r\n\r\nclass BaseMeta(UntargetedAttacker):\r\n \"\"\"Base model for Mettack.\"\"\"\r\n # mettack can also conduct feature attack\r\n _allow_feature_attack = True\r\n\r\n def process(self,\r\n train_nodes,\r\n unlabeled_nodes,\r\n self_training_labels,\r\n hids,\r\n use_relu,\r\n reset=True):\r\n\r\n self.ll_ratio = None\r\n\r\n with tf.device(self.device):\r\n self.train_nodes = gf.astensor(train_nodes, dtype=self.intx)\r\n self.unlabeled_nodes = gf.astensor(unlabeled_nodes, dtype=self.intx)\r\n self.labels_train = gf.astensor(self.graph.node_label[train_nodes], dtype=self.intx)\r\n self.self_training_labels = gf.astensor(self_training_labels, dtype=self.intx)\r\n self.adj_tensor = gf.astensor(self.graph.adj_matrix.A, dtype=self.floatx)\r\n self.x_tensor = gf.astensor(self.graph.node_attr, dtype=self.floatx)\r\n self.build(hids=hids)\r\n self.use_relu = use_relu\r\n self.loss_fn = SparseCategoricalCrossentropy(from_logits=True)\r\n\r\n self.adj_changes = tf.Variable(tf.zeros_like(self.adj_tensor))\r\n self.x_changes = tf.Variable(tf.zeros_like(self.x_tensor))\r\n\r\n if reset:\r\n self.reset()\r\n return self\r\n\r\n def reset(self):\r\n super().reset()\r\n self.adj_flips = []\r\n self.nattr_flips = []\r\n\r\n with tf.device(self.device):\r\n self.adj_changes.assign(tf.zeros_like(self.adj_tensor))\r\n self.x_changes.assign(tf.zeros_like(self.x_tensor))\r\n return self\r\n\r\n def filter_potential_singletons(self, modified_adj):\r\n \"\"\"\r\n Computes a mask for entries potentially leading to singleton nodes, i.e. one of the two nodes corresponding to\r\n the entry have degree 1 and there is an edge between the two nodes.\r\n Returns\r\n -------\r\n tf.Tensor shape [N, N], float with ones everywhere except the entries of potential singleton nodes,\r\n where the returned tensor has value 0.\r\n \"\"\"\r\n N = self.num_nodes\r\n degrees = tf.reduce_sum(modified_adj, axis=1)\r\n degree_one = tf.equal(degrees, 1)\r\n resh = tf.reshape(tf.tile(degree_one, [N]), [N, N])\r\n l_and = tf.logical_and(resh, tf.equal(modified_adj, 1))\r\n logical_and_symmetric = tf.logical_or(l_and, tf.transpose(l_and))\r\n flat_mask = 1. - tf.cast(logical_and_symmetric, self.floatx)\r\n return flat_mask\r\n\r\n# def log_likelihood_constraint(self, adj, modified_adj, ll_cutoff):\r\n# \"\"\"\r\n# Computes a mask for entries that, if the edge corresponding to the entry is added/removed, would lead to the\r\n# log likelihood constraint to be violated.\r\n\r\n# \"\"\"\r\n# t_d_min = tf.constant(2., dtype=self.floatx)\r\n# t_possible_edges = tf.constant(np.array(\r\n# np.triu(np.ones([self.num_nodes, self.num_nodes]),\r\n# k=1).nonzero()).T,\r\n# dtype=self.intx)\r\n# allowed_mask, current_ratio = likelihood_ratio_filter(\r\n# t_possible_edges, modified_adj, adj, t_d_min, ll_cutoff)\r\n\r\n# return allowed_mask, current_ratio\r\n\r\n @tf.function\r\n def get_perturbed_adj(self, adj, adj_changes):\r\n adj_changes_square = adj_changes - tf.linalg.band_part(adj_changes, 0, 0)\r\n adj_changes_sym = adj_changes_square + tf.transpose(adj_changes_square)\r\n clipped_adj_changes = self.clip(adj_changes_sym)\r\n return adj + clipped_adj_changes\r\n\r\n @tf.function\r\n def get_perturbed_x(self, x, x_changes):\r\n return x + self.clip(x_changes)\r\n\r\n def forward(self, x, adj):\r\n h = x\r\n for w in self.weights[:-1]:\r\n h = adj @ h @ w\r\n if self.use_relu:\r\n h = relu(h)\r\n\r\n return adj @ h @ self.weights[-1]\r\n\r\n def structure_score(self,\r\n modified_adj,\r\n adj_grad,\r\n ll_constraint=None,\r\n ll_cutoff=None):\r\n adj_meta_grad = adj_grad * (-2. * modified_adj + 1.)\r\n # Make sure that the minimum entry is 0.\r\n adj_meta_grad -= tf.reduce_min(adj_meta_grad)\r\n\r\n# if not self.allow_singleton:\r\n# # Set entries to 0 that could lead to singleton nodes.\r\n# singleton_mask = self.filter_potential_singletons(modified_adj)\r\n# adj_meta_grad *= singleton_mask\r\n\r\n # if ll_constraint:\r\n # allowed_mask, self.ll_ratio = self.log_likelihood_constraint(\r\n # modified_adj, self.adj_tensor, ll_cutoff)\r\n # adj_meta_grad = adj_meta_grad * allowed_mask\r\n\r\n return tf.reshape(adj_meta_grad, [-1])\r\n\r\n def feature_score(self, modified_nx, x_grad):\r\n x_meta_grad = x_grad * (-2. * modified_nx + 1.)\r\n x_meta_grad -= tf.reduce_min(x_meta_grad)\r\n return tf.reshape(x_meta_grad, [-1])\r\n\r\n def clip(self, matrix):\r\n clipped_matrix = tf.clip_by_value(matrix, -1., 1.)\r\n return clipped_matrix\r\n\r\n\r\[email protected]()\r\nclass Metattack(BaseMeta):\r\n def process(self,\r\n train_nodes,\r\n unlabeled_nodes,\r\n self_training_labels,\r\n hids=[16],\r\n lr=0.1,\r\n epochs=100,\r\n momentum=0.9,\r\n lambda_=0.,\r\n use_relu=True,\r\n reset=True):\r\n\r\n self.lr = lr\r\n self.epochs = epochs\r\n self.momentum = momentum\r\n self.lambda_ = lambda_\r\n\r\n if lambda_ not in (0., 0.5, 1.):\r\n raise ValueError(\r\n 'Invalid value of `lambda_`, allowed values [0: (meta-self), 1: (meta-train), 0.5: (meta-both)].'\r\n )\r\n return super().process(train_nodes=train_nodes,\r\n unlabeled_nodes=unlabeled_nodes,\r\n self_training_labels=self_training_labels,\r\n hids=hids,\r\n use_relu=use_relu,\r\n reset=reset)\r\n\r\n def build(self, hids):\r\n hids = gf.repeat(hids)\r\n weights, w_velocities = [], []\r\n zeros_initializer = zeros()\r\n\r\n pre_hid = self.num_attrs\r\n for hid in hids + [self.num_classes]:\r\n shape = (pre_hid, hid)\r\n # use zeros_initializer temporary to save time\r\n weight = tf.Variable(zeros_initializer(shape=shape, dtype=self.floatx))\r\n w_velocity = tf.Variable(zeros_initializer(shape=shape, dtype=self.floatx))\r\n\r\n weights.append(weight)\r\n w_velocities.append(w_velocity)\r\n\r\n pre_hid = hid\r\n\r\n self.weights, self.w_velocities = weights, w_velocities\r\n\r\n def initialize(self):\r\n w_initializer = glorot_uniform()\r\n zeros_initializer = zeros()\r\n\r\n for w, wv in zip(self.weights, self.w_velocities):\r\n w.assign(w_initializer(w.shape, dtype=self.floatx))\r\n wv.assign(zeros_initializer(wv.shape, dtype=self.floatx))\r\n\r\n @tf.function\r\n def train_step(self, x, adj, index, labels):\r\n with tf.GradientTape() as tape:\r\n output = self.forward(x, adj)\r\n logit = tf.gather(output, index)\r\n loss = self.loss_fn(labels, logit)\r\n\r\n weight_grads = tape.gradient(loss, self.weights)\r\n return weight_grads\r\n\r\n def inner_train(self, x, adj):\r\n\r\n self.initialize()\r\n adj_norm = gf.normalize_adj_tensor(adj)\r\n\r\n for it in range(self.epochs):\r\n weight_grads = self.train_step(x, adj_norm, self.train_nodes, self.labels_train)\r\n\r\n for v, g in zip(self.w_velocities, weight_grads):\r\n v.assign(self.momentum * v + g)\r\n\r\n for w, v in zip(self.weights, self.w_velocities):\r\n w.assign_sub(self.lr * v)\r\n\r\n @tf.function\r\n def meta_grad(self):\r\n\r\n modified_adj, modified_nx = self.adj_tensor, self.x_tensor\r\n adj_tensor, x_tensor = self.adj_tensor, self.x_tensor\r\n persistent = self.structure_attack and self.feature_attack\r\n\r\n with tf.GradientTape(persistent=persistent) as tape:\r\n if self.structure_attack:\r\n modified_adj = self.get_perturbed_adj(adj_tensor, self.adj_changes)\r\n\r\n if self.feature_attack:\r\n modified_nx = self.get_perturbed_x(x_tensor, self.x_changes)\r\n\r\n adj_norm = gf.normalize_adj_tensor(modified_adj)\r\n output = self.forward(modified_nx, adj_norm)\r\n logit_labeled = tf.gather(output, self.train_nodes)\r\n logit_unlabeled = tf.gather(output, self.unlabeled_nodes)\r\n\r\n loss_labeled = self.loss_fn(self.labels_train, logit_labeled)\r\n loss_unlabeled = self.loss_fn(self.self_training_labels, logit_unlabeled)\r\n\r\n attack_loss = self.lambda_ * loss_labeled + (1 - self.lambda_) * loss_unlabeled\r\n\r\n adj_grad, x_grad = None, None\r\n\r\n if self.feature_attack:\r\n x_grad = tape.gradient(attack_loss, self.x_changes)\r\n\r\n if self.structure_attack:\r\n adj_grad = tape.gradient(attack_loss, self.adj_changes)\r\n\r\n return x_grad, adj_grad\r\n\r\n def attack(self,\r\n num_budgets=0.05,\r\n structure_attack=True,\r\n feature_attack=False,\r\n ll_constraint=False,\r\n ll_cutoff=0.004,\r\n disable=False):\r\n super().attack(num_budgets, structure_attack, feature_attack)\r\n\r\n if ll_constraint:\r\n raise NotImplementedError(\r\n \"`log_likelihood_constraint` has not been well tested.\"\r\n \" Please set `ll_constraint=False` to achieve a better performance.\"\r\n )\r\n\r\n if feature_attack and not self.graph.is_binary():\r\n raise ValueError(\r\n \"Attacks on the node features are currently only supported for binary attributes.\"\r\n )\r\n\r\n with tf.device(self.device):\r\n modified_adj, modified_nx = self.adj_tensor, self.x_tensor\r\n adj_tensor, x_tensor = self.adj_tensor, self.x_tensor\r\n adj_changes, x_changes = self.adj_changes, self.x_changes\r\n adj_flips, nattr_flips = self.adj_flips, self.nattr_flips\r\n\r\n self.inner_train(modified_nx, modified_adj)\r\n \r\n for it in tqdm(range(self.num_budgets),\r\n desc='Peturbing Graph',\r\n disable=disable):\r\n\r\n if structure_attack:\r\n modified_adj = self.get_perturbed_adj(adj_tensor, adj_changes)\r\n\r\n if feature_attack:\r\n modified_nx = self.get_perturbed_x(x_tensor,x_changes)\r\n\r\n self.inner_train(modified_nx, modified_adj)\r\n\r\n x_grad, adj_grad = self.meta_grad()\r\n\r\n adj_meta_score = tf.constant(0.0)\r\n x_meta_score = tf.constant(0.0)\r\n\r\n if structure_attack:\r\n adj_meta_score = self.structure_score(modified_adj, adj_grad, ll_constraint, ll_cutoff)\r\n\r\n if feature_attack:\r\n x_meta_score = self.feature_score(modified_nx, x_grad)\r\n\r\n if tf.reduce_max(adj_meta_score) >= tf.reduce_max(x_meta_score) and structure_attack:\r\n adj_meta_argmax = tf.argmax(adj_meta_score)\r\n row, col = divmod(adj_meta_argmax.numpy(), self.num_nodes)\r\n adj_changes[row, col].assign(-2. * modified_adj[row, col] + 1.)\r\n adj_changes[col, row].assign(-2. * modified_adj[col, row] + 1.)\r\n adj_flips.append((row, col))\r\n elif tf.reduce_max(adj_meta_score) < tf.reduce_max(x_meta_score) and feature_attack:\r\n x_meta_argmax = tf.argmax(x_meta_score)\r\n row, col = divmod(x_meta_argmax.numpy(), self.num_attrs)\r\n x_changes[row, col].assign(-2 * modified_nx[row, col] + 1)\r\n nattr_flips.append((row, col))\r\n else:\r\n warnings.warn(f\"Do nothing at iter {it}. adj_meta_score={adj_meta_score}, x_meta_score={x_meta_score}\",\r\n UserWarning)\r\n\r\n\r\[email protected]()\r\nclass MetaApprox(BaseMeta):\r\n def process(self,\r\n train_nodes,\r\n unlabeled_nodes,\r\n self_training_labels,\r\n hids=[16],\r\n lr=0.1,\r\n epochs=100,\r\n lambda_=0.,\r\n use_relu=True):\r\n\r\n self.lr = lr\r\n self.epochs = epochs\r\n self.lambda_ = lambda_\r\n\r\n if lambda_ not in (0., 0.5, 1.):\r\n raise ValueError(\r\n 'Invalid value of `lambda_`, allowed values [0: (meta-self), 1: (meta-train), 0.5: (meta-both)].'\r\n )\r\n return super().process(train_nodes=train_nodes,\r\n unlabeled_nodes=unlabeled_nodes,\r\n self_training_labels=self_training_labels,\r\n hids=hids,\r\n use_relu=use_relu)\r\n\r\n def build(self, hids):\r\n hids = gf.repeat(hids)\r\n weights = []\r\n zeros_initializer = zeros()\r\n\r\n pre_hid = self.num_attrs\r\n for hid in hids + [self.num_classes]:\r\n shape = (pre_hid, hid)\r\n # use zeros_initializer temporary to save time\r\n weight = tf.Variable(zeros_initializer(shape=shape, dtype=self.floatx))\r\n weights.append(weight)\r\n pre_hid = hid\r\n\r\n self.weights = weights\r\n self.adj_grad_sum = tf.Variable(tf.zeros_like(self.adj_tensor))\r\n self.x_grad_sum = tf.Variable(tf.zeros_like(self.x_tensor))\r\n self.optimizer = Adam(self.lr, epsilon=1e-8)\r\n\r\n def initialize(self):\r\n\r\n w_initializer = glorot_uniform()\r\n zeros_initializer = zeros()\r\n\r\n for w in self.weights:\r\n w.assign(w_initializer(w.shape, dtype=self.floatx))\r\n\r\n if self.structure_attack:\r\n self.adj_grad_sum.assign(zeros_initializer(self.adj_grad_sum.shape, dtype=self.floatx))\r\n\r\n if self.feature_attack:\r\n self.x_grad_sum.assign(zeros_initializer(self.x_grad_sum.shape, dtype=self.floatx))\r\n\r\n # reset optimizer\r\n for var in self.optimizer.variables():\r\n var.assign(tf.zeros_like(var))\r\n\r\n @tf.function\r\n def meta_grad(self):\r\n self.initialize()\r\n\r\n modified_adj, modified_nx = self.adj_tensor, self.x_tensor\r\n adj_tensor, x_tensor = self.adj_tensor, self.x_tensor\r\n adj_grad_sum, x_grad_sum = self.adj_grad_sum, self.x_grad_sum\r\n optimizer = self.optimizer\r\n\r\n for it in tf.range(self.epochs):\r\n\r\n with tf.GradientTape(persistent=True) as tape:\r\n if self.structure_attack:\r\n modified_adj = self.get_perturbed_adj(adj_tensor, self.adj_changes)\r\n\r\n if self.feature_attack:\r\n modified_nx = self.get_perturbed_x(x_tensor, self.x_changes)\r\n\r\n adj_norm = gf.normalize_adj_tensor(modified_adj)\r\n output = self.forward(modified_nx, adj_norm)\r\n logit_labeled = tf.gather(output, self.train_nodes)\r\n logit_unlabeled = tf.gather(output, self.unlabeled_nodes)\r\n\r\n loss_labeled = self.loss_fn(self.labels_train, logit_labeled)\r\n loss_unlabeled = self.loss_fn(self.self_training_labels, logit_unlabeled)\r\n\r\n attack_loss = self.lambda_ * loss_labeled + (1 - self.lambda_) * loss_unlabeled\r\n\r\n adj_grad, x_grad = None, None\r\n\r\n gradients = tape.gradient(loss_labeled, self.weights)\r\n optimizer.apply_gradients(zip(gradients, self.weights))\r\n\r\n if self.structure_attack:\r\n adj_grad = tape.gradient(attack_loss, self.adj_changes)\r\n adj_grad_sum.assign_add(adj_grad)\r\n\r\n if self.feature_attack:\r\n x_grad = tape.gradient(attack_loss, self.x_changes)\r\n x_grad_sum.assign_add(x_grad)\r\n\r\n del tape\r\n\r\n return x_grad_sum, adj_grad_sum\r\n\r\n def attack(self,\r\n num_budgets=0.05,\r\n structure_attack=True,\r\n feature_attack=False,\r\n ll_constraint=False,\r\n ll_cutoff=0.004,\r\n disable=False):\r\n\r\n super().attack(num_budgets, structure_attack, feature_attack)\r\n\r\n if ll_constraint:\r\n raise NotImplementedError(\r\n \"`log_likelihood_constraint` has not been well tested.\"\r\n \" Please set `ll_constraint=False` to achieve a better performance.\"\r\n )\r\n\r\n if feature_attack and not self.graph.is_binary():\r\n raise ValueError(\r\n \"Attacks on the node features are currently only supported for binary attributes.\"\r\n )\r\n\r\n with tf.device(self.device):\r\n modified_adj, modified_nx = self.adj_tensor, self.x_tensor\r\n adj_tensor, x_tensor = self.adj_tensor, self.x_tensor\r\n adj_changes, x_changes = self.adj_changes, self.x_changes\r\n adj_flips, nattr_flips = self.adj_flips, self.nattr_flips\r\n\r\n for it in tqdm(range(self.num_budgets),\r\n desc='Peturbing Graph',\r\n disable=disable):\r\n\r\n x_grad, adj_grad = self.meta_grad()\r\n\r\n adj_meta_score = tf.constant(0.0)\r\n x_meta_score = tf.constant(0.0)\r\n\r\n if structure_attack:\r\n modified_adj = self.get_perturbed_adj(adj_tensor, adj_changes)\r\n adj_meta_score = self.structure_score(modified_adj, adj_grad, ll_constraint, ll_cutoff)\r\n\r\n if feature_attack:\r\n modified_nx = self.get_perturbed_x(x_tensor, x_changes)\r\n x_meta_score = self.feature_score(modified_nx, feature_grad)\r\n\r\n if tf.reduce_max(adj_meta_score) >= tf.reduce_max(x_meta_score) and structure_attack:\r\n adj_meta_argmax = tf.argmax(adj_meta_score)\r\n row, col = divmod(adj_meta_argmax.numpy(), self.num_nodes)\r\n adj_changes[row, col].assign(-2. * modified_adj[row, col] + 1.)\r\n adj_changes[col, row].assign(-2. * modified_adj[col, row] + 1.)\r\n adj_flips.append((row, col))\r\n elif tf.reduce_max(adj_meta_score) < tf.reduce_max(x_meta_score) and feature_attack:\r\n x_meta_argmax = tf.argmax(x_meta_score)\r\n row, col = divmod(x_meta_argmax.numpy(), self.num_attrs)\r\n x_changes[row, col].assign(-2 * modified_nx[row, col] + 1)\r\n nattr_flips.append((row, col))\r\n else:\r\n warnings.warn(f\"Do nothing at iter {it}. adj_meta_score={adj_meta_score}, x_meta_score={x_meta_score}\",\r\n UserWarning)\r\n return self\r\n", "import torch.nn as nn\r\nfrom torch import optim\r\nfrom graphgallery.nn.models import TorchEngine\r\nfrom graphgallery.nn.layers.pytorch import PairNorm\r\nfrom graphgallery.nn.metrics.pytorch import Accuracy\r\n\r\n\r\nclass SGC_PN(TorchEngine):\r\n \"\"\"PairNorm: Tackling Oversmoothing in GNNs\r\n <https://openreview.net/forum?id=rkecl1rtwB>\r\n ICLR 2020\"\"\"\r\n\r\n def __init__(self,\r\n in_features,\r\n out_features,\r\n hids=[],\r\n acts=[],\r\n K=2,\r\n norm_mode=None,\r\n norm_scale=10,\r\n dropout=0.6,\r\n weight_decay=5e-4,\r\n lr=0.005,\r\n bias=False):\r\n\r\n super().__init__()\r\n assert not hids and not acts\r\n self.linear = nn.Linear(in_features, out_features, bias=bias)\r\n self.norm = PairNorm(norm_mode, norm_scale)\r\n self.dropout = nn.Dropout(p=dropout)\r\n self.K = K\r\n self.compile(loss=nn.CrossEntropyLoss(),\r\n optimizer=optim.Adam(self.parameters(), lr=0.01),\r\n metrics=[Accuracy()])\r\n\r\n def forward(self, x, adj):\r\n x = self.norm(x)\r\n for _ in range(self.K):\r\n x = adj.mm(x)\r\n x = self.norm(x)\r\n x = self.dropout(x)\r\n x = self.linear(x)\r\n return x\r\n", "import numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nimport graphgallery as gg\r\nfrom graphgallery import functional as gf\r\nfrom graphgallery.utils import tqdm\r\nfrom graphgallery.attack.untargeted import PyTorch\r\nfrom graphgallery.attack.untargeted.untargeted_attacker import UntargetedAttacker\r\n\r\n\r\[email protected]()\r\nclass PGD(UntargetedAttacker):\r\n \"\"\"PGD cannot ensure that there is not singleton after attack.\r\n https://github.com/KaidiXu/GCN_ADV_Train\r\n \"\"\"\r\n\r\n def process(self,\r\n surrogate,\r\n train_nodes,\r\n unlabeled_nodes=None,\r\n reset=True):\r\n assert isinstance(surrogate, gg.gallery.nodeclas.GCN), surrogate\r\n\r\n # poisoning attack in DeepRobust\r\n if unlabeled_nodes is None:\r\n victim_nodes = gf.asarray(train_nodes)\r\n victim_labels = self.graph.node_label[victim_nodes]\r\n else: # Evasion attack in original paper\r\n self_training_labels = self.estimate_self_training_labels(surrogate, unlabeled_nodes)\r\n victim_nodes = np.hstack([train_nodes, unlabeled_nodes])\r\n victim_labels = np.hstack([self.graph.node_label[train_nodes], self_training_labels])\r\n\r\n adj_tensor = gf.astensor(self.graph.adj_matrix.A, device=self.device)\r\n self.victim_nodes = gf.astensor(victim_nodes, device=self.device)\r\n self.victim_labels = gf.astensor(victim_labels, device=self.device)\r\n self.adj_tensor = adj_tensor\r\n self.x_tensor = gf.astensor(self.graph.node_attr, device=self.device)\r\n self.complementary = (torch.ones_like(adj_tensor) - torch.eye(self.num_nodes).to(self.device) - 2. * adj_tensor)\r\n self.loss_fn = nn.CrossEntropyLoss()\r\n self.adj_changes = nn.Parameter(torch.zeros_like(self.adj_tensor))\r\n self.surrogate = surrogate.model.to(self.device)\r\n self.surrogate.eval()\r\n\r\n # # used for `CW_loss=True`\r\n self.label_matrix = torch.eye(self.num_classes)[self.victim_labels].to(self.device)\r\n self.range_idx = torch.arange(victim_nodes.size).to(self.device)\r\n self.indices_real = torch.stack([self.range_idx, self.victim_labels])\r\n if reset:\r\n self.reset()\r\n return self\r\n\r\n def reset(self):\r\n super().reset()\r\n self.adj_changes.data.zero_()\r\n return self\r\n\r\n def estimate_self_training_labels(self, surrogate, victim_nodes):\r\n self_training_labels = surrogate.predict(victim_nodes).argmax(1)\r\n return self_training_labels.astype(self.intx, copy=False)\r\n\r\n def attack(self,\r\n num_budgets=0.05,\r\n sample_epochs=20,\r\n C=None,\r\n CW_loss=False,\r\n epochs=200,\r\n structure_attack=True,\r\n feature_attack=False,\r\n disable=False):\r\n\r\n super().attack(num_budgets, structure_attack, feature_attack)\r\n\r\n self.CW_loss = CW_loss\r\n if not C:\r\n if CW_loss:\r\n C = 0.1\r\n else:\r\n C = 200\r\n\r\n for epoch in tqdm(range(epochs),\r\n desc='PGD Training',\r\n disable=disable):\r\n gradients = self.compute_gradients(self.victim_nodes)\r\n lr = C / np.sqrt(epoch + 1)\r\n self.adj_changes.data.add_(lr * gradients)\r\n self.projection()\r\n\r\n best_s = self.random_sample(sample_epochs, disable=disable)\r\n self.adj_flips = np.transpose(np.where(best_s > 0.))\r\n return self\r\n\r\n def compute_gradients(self, victim_nodes):\r\n loss = self.compute_loss(victim_nodes)\r\n\r\n gradients = torch.autograd.grad(loss, self.adj_changes)\r\n return gradients[0]\r\n\r\n def compute_loss(self, victim_nodes):\r\n adj = self.get_perturbed_adj()\r\n adj_norm = gf.normalize_adj_tensor(adj)\r\n logit = self.surrogate(self.x_tensor, adj_norm)[victim_nodes]\r\n\r\n if self.CW_loss:\r\n logit = F.log_softmax(logit, dim=1)\r\n best_wrong_class = (logit - 1000 * self.label_matrix).argmax(1)\r\n indices_attack = torch.stack([self.range_idx, best_wrong_class])\r\n margin = logit[self.indices_real] - logit[indices_attack] + 0.2\r\n loss = -torch.clamp(margin, min=0.)\r\n return loss.mean()\r\n else:\r\n loss = self.loss_fn(logit, self.victim_labels)\r\n\r\n return loss\r\n\r\n def get_perturbed_adj(self):\r\n adj_triu = torch.triu(self.adj_changes, diagonal=1)\r\n adj_changes = adj_triu + adj_triu.t()\r\n adj = self.complementary * adj_changes + self.adj_tensor\r\n return adj\r\n\r\n def projection(self):\r\n clipped_matrix = self.clip(self.adj_changes)\r\n num_modified = clipped_matrix.sum()\r\n\r\n if num_modified > self.num_budgets:\r\n left = (self.adj_changes - 1.).min()\r\n right = self.adj_changes.max()\r\n miu = self.bisection(left, right, epsilon=1e-5)\r\n clipped_matrix = self.clip(self.adj_changes - miu)\r\n else:\r\n pass\r\n\r\n self.adj_changes.data.copy_(clipped_matrix)\r\n\r\n def bisection(self, a, b, epsilon):\r\n def func(x):\r\n clipped_matrix = self.clip(self.adj_changes - x)\r\n return clipped_matrix.sum() - self.num_budgets\r\n\r\n miu = a\r\n while (b - a) > epsilon:\r\n miu = (a + b) / 2\r\n # Check if middle point is root\r\n if func(miu) == 0:\r\n break\r\n # Decide the side to repeat the steps\r\n if func(miu) * func(a) < 0:\r\n b = miu\r\n else:\r\n a = miu\r\n return miu\r\n\r\n def clip(self, matrix):\r\n clipped_matrix = torch.clamp(matrix, 0., 1.)\r\n return clipped_matrix\r\n\r\n def random_sample(self, sample_epochs=20, disable=False):\r\n best_loss = -10000\r\n best_s = None\r\n s = torch.triu(self.adj_changes, diagonal=1)\r\n _one = torch.tensor(1.).to(self.device)\r\n _zero = torch.tensor(0.).to(self.device)\r\n for it in tqdm(range(sample_epochs),\r\n desc='Random Sampling',\r\n disable=disable):\r\n random_matrix = torch.zeros_like(s).uniform_(0, 1)\r\n sampled = torch.where(s > random_matrix, _one, _zero)\r\n if sampled.sum() > self.num_budgets:\r\n continue\r\n\r\n self.adj_changes.data.copy_(sampled)\r\n loss = self.compute_loss(self.victim_nodes)\r\n\r\n if best_loss < loss:\r\n best_loss = loss\r\n best_s = sampled\r\n\r\n assert best_s is not None, \"Something wrong\"\r\n return best_s.detach().cpu().numpy()\r\n", "import tensorflow as tf\r\nimport numpy as np\r\nimport scipy.sparse as sp\r\nimport graphgallery as gg\r\n\r\nfrom typing import Any\r\nfrom graphgallery import functional as gf\r\nfrom .ops import sparse_adj_to_sparse_tensor\r\n\r\n_TYPE = {\r\n 'float16': tf.float16,\r\n 'float32': tf.float32,\r\n 'float64': tf.float64,\r\n 'uint8': tf.uint8,\r\n 'int8': tf.int8,\r\n 'int16': tf.int16,\r\n 'int32': tf.int32,\r\n 'int64': tf.int64,\r\n 'bool': tf.bool\r\n}\r\n\r\n\r\ndef data_type_dict():\r\n return _TYPE\r\n\r\n\r\ndef is_sparse(x: Any) -> bool:\r\n return tf.keras.backend.is_sparse(x)\r\n\r\n\r\ndef is_dense(x: Any) -> bool:\r\n # is 'RaggedTensor' a dense tensor?\r\n return any((isinstance(x, tf.Tensor), isinstance(x, tf.Variable),\r\n isinstance(x, tf.RaggedTensor)))\r\n\r\n\r\ndef is_tensor(x: Any) -> bool:\r\n return is_dense(x) or is_sparse(x)\r\n\r\n\r\ndef astensor(x, *, dtype=None, device=None, escape=None):\r\n\r\n try:\r\n if x is None or (escape is not None and isinstance(x, escape)):\r\n return x\r\n except TypeError:\r\n raise TypeError(f\"argument 'escape' must be a type or tuple of types.\")\r\n\r\n # update: accept `dict` instance\r\n if isinstance(x, dict):\r\n for k, v in x.items():\r\n try:\r\n x[k] = astensor(v, dtype=dtype, device=device, escape=escape)\r\n except TypeError:\r\n pass\r\n return x\r\n\r\n if dtype is None:\r\n dtype = gf.infer_type(x)\r\n elif isinstance(dtype, tf.dtypes.DType):\r\n dtype = dtype.name\r\n elif isinstance(dtype, (np.dtype, str)):\r\n dtype = str(dtype)\r\n else:\r\n raise TypeError(\r\n f\"argument 'dtype' must be tf.dtypes.DType, np.dtype or str, but got {type(dtype)}.\"\r\n )\r\n\r\n with tf.device(device):\r\n if is_tensor(x):\r\n if x.dtype != dtype:\r\n return tf.cast(x, dtype=dtype)\r\n return tf.identity(x)\r\n elif gf.is_tensor(x, backend='torch'):\r\n return astensor(gf.tensoras(x),\r\n dtype=dtype,\r\n device=device,\r\n escape=escape)\r\n elif sp.isspmatrix(x):\r\n return sparse_adj_to_sparse_tensor(x, dtype=dtype)\r\n elif any((isinstance(x, (np.ndarray, np.matrix)), gg.is_listlike(x),\r\n gg.is_scalar(x))):\r\n return tf.convert_to_tensor(x, dtype=dtype)\r\n else:\r\n raise TypeError(\r\n f\"Invalid type of inputs. Allowed data type(Tensor, SparseTensor, Numpy array, Scipy sparse matrix, None), but got {type(x)}.\"\r\n )\r\n" ]
[ [ "tensorflow.keras.backend.learning_phase", "tensorflow.SparseTensor", "tensorflow.keras.layers.Dropout", "tensorflow.keras.backend.is_sparse", "tensorflow.nn.dropout" ], [ "numpy.hstack" ], [ "tensorflow.device", "tensorflow.reduce_sum", "tensorflow.equal", "tensorflow.cast", "tensorflow.keras.activations.relu", "tensorflow.gather", "tensorflow.argmax", "tensorflow.tile", "tensorflow.zeros_like", "tensorflow.keras.initializers.glorot_uniform", "tensorflow.GradientTape", "tensorflow.clip_by_value", "tensorflow.reduce_max", "tensorflow.transpose", "tensorflow.constant", "tensorflow.range", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.linalg.band_part", "tensorflow.reshape", "tensorflow.keras.initializers.zeros", "tensorflow.reduce_min", "tensorflow.keras.optimizers.Adam" ], [ "torch.nn.CrossEntropyLoss", "torch.nn.Linear", "torch.nn.Dropout" ], [ "numpy.hstack", "torch.nn.CrossEntropyLoss", "torch.ones_like", "numpy.sqrt", "torch.nn.functional.log_softmax", "torch.zeros_like", "torch.eye", "torch.tensor", "torch.where", "torch.arange", "torch.stack", "torch.triu", "torch.clamp", "torch.autograd.grad", "numpy.where" ], [ "scipy.sparse.isspmatrix", "tensorflow.convert_to_tensor", "tensorflow.device", "tensorflow.cast", "tensorflow.identity", "tensorflow.keras.backend.is_sparse" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [ "1.10" ] } ]
ammaryasirnaich/mmdetection3d
[ "5e549546abbb2a7b43aab59e40e87599f61dcc4a", "5e549546abbb2a7b43aab59e40e87599f61dcc4a", "5e549546abbb2a7b43aab59e40e87599f61dcc4a", "5e549546abbb2a7b43aab59e40e87599f61dcc4a", "5e549546abbb2a7b43aab59e40e87599f61dcc4a", "5e549546abbb2a7b43aab59e40e87599f61dcc4a", "5e549546abbb2a7b43aab59e40e87599f61dcc4a" ]
[ "tests/test_data/test_datasets/test_kitti_dataset.py", "tests/test_data/test_datasets/test_nuscene_dataset.py", "IntensityNet_testing/train.py", "mmdet3d/models/detectors/h3dnet.py", "mmdet3d/ops/sparse_block.py", "tools/data_converter/scannet_data_utils.py", "mmdet3d/datasets/nuscenes_dataset.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nimport os\nimport tempfile\n\nimport numpy as np\nimport pytest\nimport torch\n\nfrom mmdet3d.core.bbox import LiDARInstance3DBoxes, limit_period\nfrom mmdet3d.datasets import KittiDataset\n\n\ndef _generate_kitti_dataset_config():\n data_root = 'tests/data/kitti'\n ann_file = 'tests/data/kitti/kitti_infos_train.pkl'\n classes = ['Pedestrian', 'Cyclist', 'Car']\n pts_prefix = 'velodyne_reduced'\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n file_client_args=dict(backend='disk')),\n dict(\n type='MultiScaleFlipAug3D',\n img_scale=(1333, 800),\n pts_scale_ratio=1,\n flip=False,\n transforms=[\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[0, 0],\n scale_ratio_range=[1.0, 1.0],\n translation_std=[0, 0, 0]),\n dict(type='RandomFlip3D'),\n dict(\n type='PointsRangeFilter',\n point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(\n type='DefaultFormatBundle3D',\n class_names=classes,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ])\n ]\n modality = dict(use_lidar=True, use_camera=False)\n split = 'training'\n return data_root, ann_file, classes, pts_prefix, pipeline, modality, split\n\n\ndef _generate_kitti_multi_modality_dataset_config():\n data_root = 'tests/data/kitti'\n ann_file = 'tests/data/kitti/kitti_infos_train.pkl'\n classes = ['Pedestrian', 'Cyclist', 'Car']\n pts_prefix = 'velodyne_reduced'\n img_norm_cfg = dict(\n mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n file_client_args=dict(backend='disk')),\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug3D',\n img_scale=(1333, 800),\n pts_scale_ratio=1,\n flip=False,\n transforms=[\n dict(type='Resize', multiscale_mode='value', keep_ratio=True),\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[0, 0],\n scale_ratio_range=[1., 1.],\n translation_std=[0, 0, 0]),\n dict(type='RandomFlip3D'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(\n type='PointsRangeFilter',\n point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(\n type='DefaultFormatBundle3D',\n class_names=classes,\n with_label=False),\n dict(type='Collect3D', keys=['points', 'img'])\n ])\n ]\n modality = dict(use_lidar=True, use_camera=True)\n split = 'training'\n return data_root, ann_file, classes, pts_prefix, pipeline, modality, split\n\n\ndef test_getitem():\n np.random.seed(0)\n data_root, ann_file, classes, pts_prefix, \\\n _, modality, split = _generate_kitti_dataset_config()\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n file_client_args=dict(backend='disk')),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=True,\n with_label_3d=True,\n file_client_args=dict(backend='disk')),\n dict(\n type='ObjectSample',\n db_sampler=dict(\n data_root='tests/data/kitti/',\n # in coordinate system refactor, this test file is modified\n info_path='tests/data/kitti/kitti_dbinfos_train.pkl',\n rate=1.0,\n prepare=dict(\n filter_by_difficulty=[-1],\n filter_by_min_points=dict(Pedestrian=10)),\n classes=['Pedestrian', 'Cyclist', 'Car'],\n sample_groups=dict(Pedestrian=6))),\n dict(\n type='ObjectNoise',\n num_try=100,\n translation_std=[1.0, 1.0, 0.5],\n global_rot_range=[0.0, 0.0],\n rot_range=[-0.78539816, 0.78539816]),\n dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[-0.78539816, 0.78539816],\n scale_ratio_range=[0.95, 1.05]),\n dict(\n type='PointsRangeFilter',\n point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(\n type='ObjectRangeFilter',\n point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(type='PointShuffle'),\n dict(\n type='DefaultFormatBundle3D',\n class_names=['Pedestrian', 'Cyclist', 'Car']),\n dict(\n type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])\n ]\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n pipeline, classes, modality)\n data = kitti_dataset[0]\n points = data['points']._data\n gt_bboxes_3d = data['gt_bboxes_3d']._data\n gt_labels_3d = data['gt_labels_3d']._data\n expected_gt_bboxes_3d = torch.tensor(\n [[9.5081, -5.2269, -1.1370, 1.2288, 0.4915, 1.9353, 1.9988]])\n expected_gt_labels_3d = torch.tensor([0])\n rot_matrix = data['img_metas']._data['pcd_rotation']\n rot_angle = data['img_metas']._data['pcd_rotation_angle']\n horizontal_flip = data['img_metas']._data['pcd_horizontal_flip']\n vertical_flip = data['img_metas']._data['pcd_vertical_flip']\n expected_rot_matrix = torch.tensor([[0.8018, 0.5976, 0.0000],\n [-0.5976, 0.8018, 0.0000],\n [0.0000, 0.0000, 1.0000]])\n expected_rot_angle = 0.6404654291602163\n noise_angle = 0.20247319\n assert torch.allclose(expected_rot_matrix, rot_matrix, atol=1e-4)\n assert math.isclose(expected_rot_angle, rot_angle, abs_tol=1e-4)\n assert horizontal_flip is True\n assert vertical_flip is False\n\n # after coord system refactor\n expected_gt_bboxes_3d[:, :3] = \\\n expected_gt_bboxes_3d[:, :3] @ rot_matrix @ rot_matrix\n expected_gt_bboxes_3d[:, -1:] = -np.pi - expected_gt_bboxes_3d[:, -1:] \\\n + 2 * rot_angle - 2 * noise_angle\n expected_gt_bboxes_3d[:, -1:] = limit_period(\n expected_gt_bboxes_3d[:, -1:], period=np.pi * 2)\n assert points.shape == (780, 4)\n assert torch.allclose(\n gt_bboxes_3d.tensor, expected_gt_bboxes_3d, atol=1e-4)\n assert torch.all(gt_labels_3d == expected_gt_labels_3d)\n\n # test multi-modality KITTI dataset\n np.random.seed(0)\n point_cloud_range = [0, -40, -3, 70.4, 40, 1]\n img_norm_cfg = dict(\n mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\n multi_modality_pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4),\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),\n dict(\n type='Resize',\n img_scale=[(640, 192), (2560, 768)],\n multiscale_mode='range',\n keep_ratio=True),\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[-0.78539816, 0.78539816],\n scale_ratio_range=[0.95, 1.05],\n translation_std=[0.2, 0.2, 0.2]),\n dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),\n dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='PointShuffle'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle3D', class_names=classes),\n dict(\n type='Collect3D',\n keys=['points', 'img', 'gt_bboxes_3d', 'gt_labels_3d']),\n ]\n modality = dict(use_lidar=True, use_camera=True)\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n multi_modality_pipeline, classes, modality)\n data = kitti_dataset[0]\n img = data['img']._data\n lidar2img = data['img_metas']._data['lidar2img']\n\n expected_lidar2img = np.array(\n [[6.02943726e+02, -7.07913330e+02, -1.22748432e+01, -1.70942719e+02],\n [1.76777252e+02, 8.80879879e+00, -7.07936157e+02, -1.02568634e+02],\n [9.99984801e-01, -1.52826728e-03, -5.29071223e-03, -3.27567995e-01],\n [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])\n\n assert img.shape[:] == (3, 416, 1344)\n assert np.allclose(lidar2img, expected_lidar2img)\n\n\ndef test_evaluate():\n if not torch.cuda.is_available():\n pytest.skip('test requires GPU and torch+cuda')\n data_root, ann_file, classes, pts_prefix, \\\n pipeline, modality, split = _generate_kitti_dataset_config()\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n pipeline, classes, modality)\n boxes_3d = LiDARInstance3DBoxes(\n torch.tensor(\n [[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100]]))\n labels_3d = torch.tensor([\n 0,\n ])\n scores_3d = torch.tensor([0.5])\n metric = ['mAP']\n result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)\n ap_dict = kitti_dataset.evaluate([result], metric)\n assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_easy'],\n 3.0303030303030307)\n assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_moderate'],\n 3.0303030303030307)\n assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_hard'],\n 3.0303030303030307)\n\n\ndef test_show():\n from os import path as osp\n\n import mmcv\n\n from mmdet3d.core.bbox import LiDARInstance3DBoxes\n tmp_dir = tempfile.TemporaryDirectory()\n temp_dir = tmp_dir.name\n data_root, ann_file, classes, pts_prefix, \\\n pipeline, modality, split = _generate_kitti_dataset_config()\n kitti_dataset = KittiDataset(\n data_root, ann_file, split=split, modality=modality, pipeline=pipeline)\n boxes_3d = LiDARInstance3DBoxes(\n torch.tensor(\n [[46.1218, -4.6496, -0.9275, 0.5316, 1.4442, 1.7450, 1.1749],\n [33.3189, 0.1981, 0.3136, 0.5656, 1.2301, 1.7985, 1.5723],\n [46.1366, -4.6404, -0.9510, 0.5162, 1.6501, 1.7540, 1.3778],\n [33.2646, 0.2297, 0.3446, 0.5746, 1.3365, 1.7947, 1.5430],\n [58.9079, 16.6272, -1.5829, 1.5656, 3.9313, 1.4899, 1.5505]]))\n scores_3d = torch.tensor([0.1815, 0.1663, 0.5792, 0.2194, 0.2780])\n labels_3d = torch.tensor([0, 0, 1, 1, 2])\n result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d)\n results = [result]\n kitti_dataset.show(results, temp_dir, show=False)\n pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')\n gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')\n pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')\n mmcv.check_file_exist(pts_file_path)\n mmcv.check_file_exist(gt_file_path)\n mmcv.check_file_exist(pred_file_path)\n tmp_dir.cleanup()\n\n # test show with pipeline\n eval_pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4),\n dict(\n type='DefaultFormatBundle3D',\n class_names=classes,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ]\n tmp_dir = tempfile.TemporaryDirectory()\n temp_dir = tmp_dir.name\n kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)\n pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')\n gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')\n pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')\n mmcv.check_file_exist(pts_file_path)\n mmcv.check_file_exist(gt_file_path)\n mmcv.check_file_exist(pred_file_path)\n tmp_dir.cleanup()\n\n # test multi-modality show\n tmp_dir = tempfile.TemporaryDirectory()\n temp_dir = tmp_dir.name\n _, _, _, _, multi_modality_pipeline, modality, _ = \\\n _generate_kitti_multi_modality_dataset_config()\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n multi_modality_pipeline, classes, modality)\n kitti_dataset.show(results, temp_dir, show=False)\n pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')\n gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')\n pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')\n img_file_path = osp.join(temp_dir, '000000', '000000_img.png')\n img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')\n img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')\n mmcv.check_file_exist(pts_file_path)\n mmcv.check_file_exist(gt_file_path)\n mmcv.check_file_exist(pred_file_path)\n mmcv.check_file_exist(img_file_path)\n mmcv.check_file_exist(img_pred_path)\n mmcv.check_file_exist(img_gt_file)\n tmp_dir.cleanup()\n\n # test multi-modality show with pipeline\n eval_pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4),\n dict(type='LoadImageFromFile'),\n dict(\n type='DefaultFormatBundle3D',\n class_names=classes,\n with_label=False),\n dict(type='Collect3D', keys=['points', 'img'])\n ]\n tmp_dir = tempfile.TemporaryDirectory()\n temp_dir = tmp_dir.name\n kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)\n pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')\n gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')\n pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')\n img_file_path = osp.join(temp_dir, '000000', '000000_img.png')\n img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')\n img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')\n mmcv.check_file_exist(pts_file_path)\n mmcv.check_file_exist(gt_file_path)\n mmcv.check_file_exist(pred_file_path)\n mmcv.check_file_exist(img_file_path)\n mmcv.check_file_exist(img_pred_path)\n mmcv.check_file_exist(img_gt_file)\n tmp_dir.cleanup()\n\n\ndef test_format_results():\n from mmdet3d.core.bbox import LiDARInstance3DBoxes\n data_root, ann_file, classes, pts_prefix, \\\n pipeline, modality, split = _generate_kitti_dataset_config()\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n pipeline, classes, modality)\n # coord system refactor\n boxes_3d = LiDARInstance3DBoxes(\n torch.tensor(\n [[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))\n labels_3d = torch.tensor([\n 0,\n ])\n scores_3d = torch.tensor([0.5])\n result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)\n results = [result]\n result_files, tmp_dir = kitti_dataset.format_results(results)\n expected_name = np.array(['Pedestrian'])\n expected_truncated = np.array([0.])\n expected_occluded = np.array([0])\n # coord sys refactor\n expected_alpha = np.array(-3.3410306 + np.pi)\n expected_bbox = np.array([[710.443, 144.00221, 820.29114, 307.58667]])\n expected_dimensions = np.array([[1.2, 1.89, 0.48]])\n expected_location = np.array([[1.8399826, 1.4700007, 8.410018]])\n expected_rotation_y = np.array([0.0100])\n expected_score = np.array([0.5])\n expected_sample_idx = np.array([0])\n assert np.all(result_files[0]['name'] == expected_name)\n assert np.allclose(result_files[0]['truncated'], expected_truncated)\n assert np.all(result_files[0]['occluded'] == expected_occluded)\n assert np.allclose(result_files[0]['alpha'], expected_alpha, 1e-3)\n assert np.allclose(result_files[0]['bbox'], expected_bbox)\n assert np.allclose(result_files[0]['dimensions'], expected_dimensions)\n assert np.allclose(result_files[0]['location'], expected_location)\n assert np.allclose(result_files[0]['rotation_y'], expected_rotation_y,\n 1e-3)\n assert np.allclose(result_files[0]['score'], expected_score)\n assert np.allclose(result_files[0]['sample_idx'], expected_sample_idx)\n tmp_dir.cleanup()\n\n\ndef test_bbox2result_kitti():\n data_root, ann_file, classes, pts_prefix, \\\n pipeline, modality, split = _generate_kitti_dataset_config()\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n pipeline, classes, modality)\n boxes_3d = LiDARInstance3DBoxes(\n torch.tensor(\n [[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))\n labels_3d = torch.tensor([\n 0,\n ])\n scores_3d = torch.tensor([0.5])\n result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)\n results = [result]\n tmp_dir = tempfile.TemporaryDirectory()\n temp_kitti_result_dir = tmp_dir.name\n det_annos = kitti_dataset.bbox2result_kitti(\n results, classes, submission_prefix=temp_kitti_result_dir)\n expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')\n expected_name = np.array(['Pedestrian'])\n expected_dimensions = np.array([1.2000, 1.8900, 0.4800])\n # coord system refactor (reverse sign)\n expected_rotation_y = 0.0100\n expected_score = np.array([0.5])\n assert np.all(det_annos[0]['name'] == expected_name)\n assert np.allclose(det_annos[0]['rotation_y'], expected_rotation_y, 1e-3)\n assert np.allclose(det_annos[0]['score'], expected_score)\n assert np.allclose(det_annos[0]['dimensions'], expected_dimensions)\n assert os.path.exists(expected_file_path)\n tmp_dir.cleanup()\n\n tmp_dir = tempfile.TemporaryDirectory()\n temp_kitti_result_dir = tmp_dir.name\n boxes_3d = LiDARInstance3DBoxes(torch.tensor([]))\n labels_3d = torch.tensor([])\n scores_3d = torch.tensor([])\n empty_result = dict(\n boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)\n results = [empty_result]\n det_annos = kitti_dataset.bbox2result_kitti(\n results, classes, submission_prefix=temp_kitti_result_dir)\n expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')\n assert os.path.exists(expected_file_path)\n tmp_dir.cleanup()\n\n\ndef test_bbox2result_kitti2d():\n data_root, ann_file, classes, pts_prefix, \\\n pipeline, modality, split = _generate_kitti_dataset_config()\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n pipeline, classes, modality)\n bboxes = np.array([[[46.1218, -4.6496, -0.9275, 0.5316, 0.5],\n [33.3189, 0.1981, 0.3136, 0.5656, 0.5]],\n [[46.1366, -4.6404, -0.9510, 0.5162, 0.5],\n [33.2646, 0.2297, 0.3446, 0.5746, 0.5]]])\n det_annos = kitti_dataset.bbox2result_kitti2d([bboxes], classes)\n expected_name = np.array(\n ['Pedestrian', 'Pedestrian', 'Cyclist', 'Cyclist'])\n expected_bbox = np.array([[46.1218, -4.6496, -0.9275, 0.5316],\n [33.3189, 0.1981, 0.3136, 0.5656],\n [46.1366, -4.6404, -0.951, 0.5162],\n [33.2646, 0.2297, 0.3446, 0.5746]])\n expected_score = np.array([0.5, 0.5, 0.5, 0.5])\n assert np.all(det_annos[0]['name'] == expected_name)\n assert np.allclose(det_annos[0]['bbox'], expected_bbox)\n assert np.allclose(det_annos[0]['score'], expected_score)\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport tempfile\n\nimport numpy as np\nimport torch\n\nfrom mmdet3d.datasets import NuScenesDataset\n\n\ndef test_getitem():\n np.random.seed(0)\n point_cloud_range = [-50, -50, -5, 50, 50, 3]\n file_client_args = dict(backend='disk')\n class_names = [\n 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle',\n 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier'\n ]\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=5,\n use_dim=5,\n file_client_args=file_client_args),\n dict(\n type='LoadPointsFromMultiSweeps',\n sweeps_num=2,\n file_client_args=file_client_args),\n dict(\n type='MultiScaleFlipAug3D',\n img_scale=(1333, 800),\n pts_scale_ratio=1,\n flip=False,\n transforms=[\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[0, 0],\n scale_ratio_range=[1., 1.],\n translation_std=[0, 0, 0]),\n dict(type='RandomFlip3D'),\n dict(\n type='PointsRangeFilter',\n point_cloud_range=point_cloud_range),\n dict(\n type='DefaultFormatBundle3D',\n class_names=class_names,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ])\n ]\n\n nus_dataset = NuScenesDataset(\n 'tests/data/nuscenes/nus_info.pkl',\n pipeline,\n 'tests/data/nuscenes',\n test_mode=True)\n data = nus_dataset[0]\n assert data['img_metas'][0].data['flip'] is False\n assert data['img_metas'][0].data['pcd_horizontal_flip'] is False\n assert data['points'][0]._data.shape == (100, 4)\n\n data = nus_dataset[1]\n assert data['img_metas'][0].data['flip'] is False\n assert data['img_metas'][0].data['pcd_horizontal_flip'] is False\n assert data['points'][0]._data.shape == (597, 4)\n\n\ndef test_show():\n from os import path as osp\n\n import mmcv\n\n from mmdet3d.core.bbox import LiDARInstance3DBoxes\n tmp_dir = tempfile.TemporaryDirectory()\n temp_dir = tmp_dir.name\n class_names = [\n 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle',\n 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier'\n ]\n eval_pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=5,\n use_dim=5,\n file_client_args=dict(backend='disk')),\n dict(\n type='LoadPointsFromMultiSweeps',\n sweeps_num=10,\n file_client_args=dict(backend='disk')),\n dict(\n type='DefaultFormatBundle3D',\n class_names=class_names,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ]\n nus_dataset = NuScenesDataset('tests/data/nuscenes/nus_info.pkl', None,\n 'tests/data/nuscenes')\n boxes_3d = LiDARInstance3DBoxes(\n torch.tensor(\n [[46.1218, -4.6496, -0.9275, 0.5316, 1.4442, 1.7450, 1.1749],\n [33.3189, 0.1981, 0.3136, 0.5656, 1.2301, 1.7985, 1.5723],\n [46.1366, -4.6404, -0.9510, 0.5162, 1.6501, 1.7540, 1.3778],\n [33.2646, 0.2297, 0.3446, 0.5746, 1.3365, 1.7947, 1.5430],\n [58.9079, 16.6272, -1.5829, 1.5656, 3.9313, 1.4899, 1.5505]]))\n scores_3d = torch.tensor([0.1815, 0.1663, 0.5792, 0.2194, 0.2780])\n labels_3d = torch.tensor([0, 0, 1, 1, 2])\n result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d)\n results = [dict(pts_bbox=result)]\n nus_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)\n file_name = 'n015-2018-08-02-17-16-37+0800__LIDAR_TOP__1533201470948018'\n pts_file_path = osp.join(temp_dir, file_name, f'{file_name}_points.obj')\n gt_file_path = osp.join(temp_dir, file_name, f'{file_name}_gt.obj')\n pred_file_path = osp.join(temp_dir, file_name, f'{file_name}_pred.obj')\n mmcv.check_file_exist(pts_file_path)\n mmcv.check_file_exist(gt_file_path)\n mmcv.check_file_exist(pred_file_path)\n tmp_dir.cleanup()\n", "# Copyright (c) OpenMMLab. All rights reserved.\nfrom __future__ import division\nimport argparse\nimport copy\nimport os\nimport time\nimport warnings\nfrom os import path as osp\n\nimport mmcv\nimport torch\nimport torch.distributed as dist\nfrom mmcv import Config, DictAction\nfrom mmcv.runner import get_dist_info, init_dist\n\nfrom mmdet import __version__ as mmdet_version\nfrom mmdet3d import __version__ as mmdet3d_version\nfrom mmdet3d.apis import init_random_seed, train_model\nfrom mmdet3d.datasets import build_dataset\nfrom mmdet3d.models import build_model\nfrom mmdet3d.utils import collect_env, get_root_logger\nfrom mmdet.apis import set_random_seed\nfrom mmseg import __version__ as mmseg_version\n\ntry:\n # If mmdet version > 2.20.0, setup_multi_processes would be imported and\n # used from mmdet instead of mmdet3d.\n from mmdet.utils import setup_multi_processes\nexcept ImportError:\n from mmdet3d.utils import setup_multi_processes\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a detector')\n configpath = \"/workspace/mmdetection3d/configs/second/hv_second_secfpn_6x8_80e_kitti-3d-car.py\"\n parser.add_argument('config', configpath, help='train config file path')\n parser.add_argument('--work-dir', help='the dir to save logs and models')\n parser.add_argument(\n '--resume-from', help='the checkpoint file to resume from')\n parser.add_argument(\n '--auto-resume',\n action='store_true',\n help='resume from the latest checkpoint automatically')\n parser.add_argument(\n '--no-validate',\n action='store_true',\n help='whether not to evaluate the checkpoint during training')\n group_gpus = parser.add_mutually_exclusive_group()\n group_gpus.add_argument(\n '--gpus',\n type=int,\n help='(Deprecated, please use --gpu-id) number of gpus to use '\n '(only applicable to non-distributed training)')\n group_gpus.add_argument(\n '--gpu-ids',\n type=int,\n nargs='+',\n help='(Deprecated, please use --gpu-id) ids of gpus to use '\n '(only applicable to non-distributed training)')\n group_gpus.add_argument(\n '--gpu-id',\n type=int,\n default=0,\n help='number of gpus to use '\n '(only applicable to non-distributed training)')\n parser.add_argument('--seed', type=int, default=0, help='random seed')\n parser.add_argument(\n '--diff-seed',\n action='store_true',\n help='Whether or not set different seeds for different ranks')\n parser.add_argument(\n '--deterministic',\n action='store_true',\n help='whether to set deterministic options for CUDNN backend.')\n parser.add_argument(\n '--options',\n nargs='+',\n action=DictAction,\n help='override some settings in the used config, the key-value pair '\n 'in xxx=yyy format will be merged into config file (deprecate), '\n 'change to --cfg-options instead.')\n parser.add_argument(\n '--cfg-options',\n nargs='+',\n action=DictAction,\n help='override some settings in the used config, the key-value pair '\n 'in xxx=yyy format will be merged into config file. If the value to '\n 'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n 'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n 'Note that the quotation marks are necessary and that no white space '\n 'is allowed.')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument(\n '--autoscale-lr',\n action='store_true',\n help='automatically scale lr with the number of gpus')\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n\n if args.options and args.cfg_options:\n raise ValueError(\n '--options and --cfg-options cannot be both specified, '\n '--options is deprecated in favor of --cfg-options')\n if args.options:\n warnings.warn('--options is deprecated in favor of --cfg-options')\n args.cfg_options = args.options\n\n return args\n\n\ndef main():\n args = parse_args()\n\n\n\n cfg = Config.fromfile(args.config)\n if args.cfg_options is not None:\n cfg.merge_from_dict(args.cfg_options)\n\n # set multi-process settings\n setup_multi_processes(cfg)\n\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n\n # work_dir is determined in this priority: CLI > segment in file > filename\n if args.work_dir is not None:\n # update configs according to CLI args if args.work_dir is not None\n cfg.work_dir = args.work_dir\n elif cfg.get('work_dir', None) is None:\n # use config filename as default work_dir if cfg.work_dir is None\n cfg.work_dir = osp.join('./work_dirs',\n osp.splitext(osp.basename(args.config))[0])\n if args.resume_from is not None:\n cfg.resume_from = args.resume_from\n\n if args.auto_resume:\n cfg.auto_resume = args.auto_resume\n warnings.warn('`--auto-resume` is only supported when mmdet'\n 'version >= 2.20.0 for 3D detection model or'\n 'mmsegmentation verision >= 0.21.0 for 3D'\n 'segmentation model')\n\n if args.gpus is not None:\n cfg.gpu_ids = range(1)\n warnings.warn('`--gpus` is deprecated because we only support '\n 'single GPU mode in non-distributed training. '\n 'Use `gpus=1` now.')\n if args.gpu_ids is not None:\n cfg.gpu_ids = args.gpu_ids[0:1]\n warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '\n 'Because we only support single GPU mode in '\n 'non-distributed training. Use the first GPU '\n 'in `gpu_ids` now.')\n if args.gpus is None and args.gpu_ids is None:\n cfg.gpu_ids = [args.gpu_id]\n\n if args.autoscale_lr:\n # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)\n cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n # re-set gpu_ids with distributed training mode\n _, world_size = get_dist_info()\n cfg.gpu_ids = range(world_size)\n\n # create work_dir\n mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))\n # dump config\n cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))\n # init the logger before other steps\n timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n log_file = osp.join(cfg.work_dir, f'{timestamp}.log')\n # specify logger name, if we still use 'mmdet', the output info will be\n # filtered and won't be saved in the log_file\n # TODO: ugly workaround to judge whether we are training det or seg model\n if cfg.model.type in ['EncoderDecoder3D']:\n logger_name = 'mmseg'\n else:\n logger_name = 'mmdet'\n logger = get_root_logger(\n log_file=log_file, log_level=cfg.log_level, name=logger_name)\n\n # init the meta dict to record some important information such as\n # environment info and seed, which will be logged\n meta = dict()\n # log env info\n env_info_dict = collect_env()\n env_info = '\\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])\n dash_line = '-' * 60 + '\\n'\n logger.info('Environment info:\\n' + dash_line + env_info + '\\n' +\n dash_line)\n meta['env_info'] = env_info\n meta['config'] = cfg.pretty_text\n\n # log some basic info\n logger.info(f'Distributed training: {distributed}')\n logger.info(f'Config:\\n{cfg.pretty_text}')\n\n # set random seeds\n seed = init_random_seed(args.seed)\n seed = seed + dist.get_rank() if args.diff_seed else seed\n logger.info(f'Set random seed to {seed}, '\n f'deterministic: {args.deterministic}')\n set_random_seed(seed, deterministic=args.deterministic)\n cfg.seed = seed\n meta['seed'] = seed\n meta['exp_name'] = osp.basename(args.config)\n\n model = build_model(\n cfg.model,\n train_cfg=cfg.get('train_cfg'),\n test_cfg=cfg.get('test_cfg'))\n model.init_weights()\n\n logger.info(f'Model:\\n{model}')\n datasets = [build_dataset(cfg.data.train)]\n if len(cfg.workflow) == 2:\n val_dataset = copy.deepcopy(cfg.data.val)\n # in case we use a dataset wrapper\n if 'dataset' in cfg.data.train:\n val_dataset.pipeline = cfg.data.train.dataset.pipeline\n else:\n val_dataset.pipeline = cfg.data.train.pipeline\n # set test_mode=False here in deep copied config\n # which do not affect AP/AR calculation later\n # refer to https://mmdetection3d.readthedocs.io/en/latest/tutorials/customize_runtime.html#customize-workflow # noqa\n val_dataset.test_mode = False\n datasets.append(build_dataset(val_dataset))\n if cfg.checkpoint_config is not None:\n # save mmdet version, config file content and class names in\n # checkpoints as meta data\n cfg.checkpoint_config.meta = dict(\n mmdet_version=mmdet_version,\n mmseg_version=mmseg_version,\n mmdet3d_version=mmdet3d_version,\n config=cfg.pretty_text,\n CLASSES=datasets[0].CLASSES,\n PALETTE=datasets[0].PALETTE # for segmentors\n if hasattr(datasets[0], 'PALETTE') else None)\n # add an attribute for visualization convenience\n model.CLASSES = datasets[0].CLASSES\n train_model(\n model,\n datasets,\n cfg,\n distributed=distributed,\n validate=(not args.no_validate),\n timestamp=timestamp,\n meta=meta)\n\n\nif __name__ == '__main__':\n main()\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet3d.core import merge_aug_bboxes_3d\nfrom ..builder import DETECTORS\nfrom .two_stage import TwoStage3DDetector\n\n\[email protected]_module()\nclass H3DNet(TwoStage3DDetector):\n r\"\"\"H3DNet model.\n\n Please refer to the `paper <https://arxiv.org/abs/2006.05682>`_\n \"\"\"\n\n def __init__(self,\n backbone,\n neck=None,\n rpn_head=None,\n roi_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n super(H3DNet, self).__init__(\n backbone=backbone,\n neck=neck,\n rpn_head=rpn_head,\n roi_head=roi_head,\n train_cfg=train_cfg,\n test_cfg=test_cfg,\n pretrained=pretrained,\n init_cfg=init_cfg)\n\n def forward_train(self,\n points,\n img_metas,\n gt_bboxes_3d,\n gt_labels_3d,\n pts_semantic_mask=None,\n pts_instance_mask=None,\n gt_bboxes_ignore=None):\n \"\"\"Forward of training.\n\n Args:\n points (list[torch.Tensor]): Points of each batch.\n img_metas (list): Image metas.\n gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): gt bboxes of each batch.\n gt_labels_3d (list[torch.Tensor]): gt class labels of each batch.\n pts_semantic_mask (list[torch.Tensor]): point-wise semantic\n label of each batch.\n pts_instance_mask (list[torch.Tensor]): point-wise instance\n label of each batch.\n gt_bboxes_ignore (list[torch.Tensor]): Specify\n which bounding.\n\n Returns:\n dict: Losses.\n \"\"\"\n points_cat = torch.stack(points)\n\n feats_dict = self.extract_feat(points_cat)\n feats_dict['fp_xyz'] = [feats_dict['fp_xyz_net0'][-1]]\n feats_dict['fp_features'] = [feats_dict['hd_feature']]\n feats_dict['fp_indices'] = [feats_dict['fp_indices_net0'][-1]]\n\n losses = dict()\n if self.with_rpn:\n rpn_outs = self.rpn_head(feats_dict, self.train_cfg.rpn.sample_mod)\n feats_dict.update(rpn_outs)\n\n rpn_loss_inputs = (points, gt_bboxes_3d, gt_labels_3d,\n pts_semantic_mask, pts_instance_mask, img_metas)\n rpn_losses = self.rpn_head.loss(\n rpn_outs,\n *rpn_loss_inputs,\n gt_bboxes_ignore=gt_bboxes_ignore,\n ret_target=True)\n feats_dict['targets'] = rpn_losses.pop('targets')\n losses.update(rpn_losses)\n\n # Generate rpn proposals\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n proposal_inputs = (points, rpn_outs, img_metas)\n proposal_list = self.rpn_head.get_bboxes(\n *proposal_inputs, use_nms=proposal_cfg.use_nms)\n feats_dict['proposal_list'] = proposal_list\n else:\n raise NotImplementedError\n\n roi_losses = self.roi_head.forward_train(feats_dict, img_metas, points,\n gt_bboxes_3d, gt_labels_3d,\n pts_semantic_mask,\n pts_instance_mask,\n gt_bboxes_ignore)\n losses.update(roi_losses)\n\n return losses\n\n def simple_test(self, points, img_metas, imgs=None, rescale=False):\n \"\"\"Forward of testing.\n\n Args:\n points (list[torch.Tensor]): Points of each sample.\n img_metas (list): Image metas.\n rescale (bool): Whether to rescale results.\n\n Returns:\n list: Predicted 3d boxes.\n \"\"\"\n points_cat = torch.stack(points)\n\n feats_dict = self.extract_feat(points_cat)\n feats_dict['fp_xyz'] = [feats_dict['fp_xyz_net0'][-1]]\n feats_dict['fp_features'] = [feats_dict['hd_feature']]\n feats_dict['fp_indices'] = [feats_dict['fp_indices_net0'][-1]]\n\n if self.with_rpn:\n proposal_cfg = self.test_cfg.rpn\n rpn_outs = self.rpn_head(feats_dict, proposal_cfg.sample_mod)\n feats_dict.update(rpn_outs)\n # Generate rpn proposals\n proposal_list = self.rpn_head.get_bboxes(\n points, rpn_outs, img_metas, use_nms=proposal_cfg.use_nms)\n feats_dict['proposal_list'] = proposal_list\n else:\n raise NotImplementedError\n\n return self.roi_head.simple_test(\n feats_dict, img_metas, points_cat, rescale=rescale)\n\n def aug_test(self, points, img_metas, imgs=None, rescale=False):\n \"\"\"Test with augmentation.\"\"\"\n points_cat = [torch.stack(pts) for pts in points]\n feats_dict = self.extract_feats(points_cat, img_metas)\n for feat_dict in feats_dict:\n feat_dict['fp_xyz'] = [feat_dict['fp_xyz_net0'][-1]]\n feat_dict['fp_features'] = [feat_dict['hd_feature']]\n feat_dict['fp_indices'] = [feat_dict['fp_indices_net0'][-1]]\n\n # only support aug_test for one sample\n aug_bboxes = []\n for feat_dict, pts_cat, img_meta in zip(feats_dict, points_cat,\n img_metas):\n if self.with_rpn:\n proposal_cfg = self.test_cfg.rpn\n rpn_outs = self.rpn_head(feat_dict, proposal_cfg.sample_mod)\n feat_dict.update(rpn_outs)\n # Generate rpn proposals\n proposal_list = self.rpn_head.get_bboxes(\n points, rpn_outs, img_metas, use_nms=proposal_cfg.use_nms)\n feat_dict['proposal_list'] = proposal_list\n else:\n raise NotImplementedError\n\n bbox_results = self.roi_head.simple_test(\n feat_dict,\n self.test_cfg.rcnn.sample_mod,\n img_meta,\n pts_cat,\n rescale=rescale)\n aug_bboxes.append(bbox_results)\n\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes = merge_aug_bboxes_3d(aug_bboxes, img_metas,\n self.bbox_head.test_cfg)\n\n return [merged_bboxes]\n\n def extract_feats(self, points, img_metas):\n \"\"\"Extract features of multiple samples.\"\"\"\n return [\n self.extract_feat(pts, img_meta)\n for pts, img_meta in zip(points, img_metas)\n ]\n", "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\nfrom torch import nn\n\nfrom mmdet.models.backbones.resnet import BasicBlock, Bottleneck\nfrom .spconv import IS_SPCONV2_AVAILABLE\n\nif IS_SPCONV2_AVAILABLE:\n from spconv.pytorch import SparseModule, SparseSequential\nelse:\n from mmcv.ops import SparseModule, SparseSequential\n\n\ndef replace_feature(out, new_features):\n if 'replace_feature' in out.__dir__():\n # spconv 2.x behaviour\n return out.replace_feature(new_features)\n else:\n out.features = new_features\n return out\n\n\nclass SparseBottleneck(Bottleneck, SparseModule):\n \"\"\"Sparse bottleneck block for PartA^2.\n\n Bottleneck block implemented with submanifold sparse convolution.\n\n Args:\n inplanes (int): inplanes of block.\n planes (int): planes of block.\n stride (int, optional): stride of the first block. Default: 1.\n downsample (Module, optional): down sample module for block.\n conv_cfg (dict, optional): dictionary to construct and config conv\n layer. Default: None.\n norm_cfg (dict, optional): dictionary to construct and config norm\n layer. Default: dict(type='BN').\n \"\"\"\n\n expansion = 4\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n downsample=None,\n conv_cfg=None,\n norm_cfg=None):\n\n SparseModule.__init__(self)\n Bottleneck.__init__(\n self,\n inplanes,\n planes,\n stride=stride,\n downsample=downsample,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg)\n\n def forward(self, x):\n identity = x.features\n\n out = self.conv1(x)\n out = replace_feature(out, self.bn1(out.features))\n out = replace_feature(out, self.relu(out.features))\n\n out = self.conv2(out)\n out = replace_feature(out, self.bn2(out.features))\n out = replace_feature(out, self.relu(out.features))\n\n out = self.conv3(out)\n out = replace_feature(out, self.bn3(out.features))\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out = replace_feature(out, out.features + identity)\n out = replace_feature(out, self.relu(out.features))\n\n return out\n\n\nclass SparseBasicBlock(BasicBlock, SparseModule):\n \"\"\"Sparse basic block for PartA^2.\n\n Sparse basic block implemented with submanifold sparse convolution.\n\n Args:\n inplanes (int): inplanes of block.\n planes (int): planes of block.\n stride (int, optional): stride of the first block. Default: 1.\n downsample (Module, optional): down sample module for block.\n conv_cfg (dict, optional): dictionary to construct and config conv\n layer. Default: None.\n norm_cfg (dict, optional): dictionary to construct and config norm\n layer. Default: dict(type='BN').\n \"\"\"\n\n expansion = 1\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n downsample=None,\n conv_cfg=None,\n norm_cfg=None):\n SparseModule.__init__(self)\n BasicBlock.__init__(\n self,\n inplanes,\n planes,\n stride=stride,\n downsample=downsample,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg)\n\n def forward(self, x):\n identity = x.features\n\n assert x.features.dim() == 2, f'x.features.dim()={x.features.dim()}'\n out = self.conv1(x)\n out = replace_feature(out, self.norm1(out.features))\n out = replace_feature(out, self.relu(out.features))\n\n out = self.conv2(out)\n out = replace_feature(out, self.norm2(out.features))\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out = replace_feature(out, out.features + identity)\n out = replace_feature(out, self.relu(out.features))\n\n return out\n\n\ndef make_sparse_convmodule(in_channels,\n out_channels,\n kernel_size,\n indice_key,\n stride=1,\n padding=0,\n conv_type='SubMConv3d',\n norm_cfg=None,\n order=('conv', 'norm', 'act')):\n \"\"\"Make sparse convolution module.\n\n Args:\n in_channels (int): the number of input channels\n out_channels (int): the number of out channels\n kernel_size (int|tuple(int)): kernel size of convolution\n indice_key (str): the indice key used for sparse tensor\n stride (int|tuple(int)): the stride of convolution\n padding (int or list[int]): the padding number of input\n conv_type (str): sparse conv type in spconv\n norm_cfg (dict[str]): config of normalization layer\n order (tuple[str]): The order of conv/norm/activation layers. It is a\n sequence of \"conv\", \"norm\" and \"act\". Common examples are\n (\"conv\", \"norm\", \"act\") and (\"act\", \"conv\", \"norm\").\n\n Returns:\n spconv.SparseSequential: sparse convolution module.\n \"\"\"\n assert isinstance(order, tuple) and len(order) <= 3\n assert set(order) | {'conv', 'norm', 'act'} == {'conv', 'norm', 'act'}\n\n conv_cfg = dict(type=conv_type, indice_key=indice_key)\n\n layers = list()\n for layer in order:\n if layer == 'conv':\n if conv_type not in [\n 'SparseInverseConv3d', 'SparseInverseConv2d',\n 'SparseInverseConv1d'\n ]:\n layers.append(\n build_conv_layer(\n conv_cfg,\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=padding,\n bias=False))\n else:\n layers.append(\n build_conv_layer(\n conv_cfg,\n in_channels,\n out_channels,\n kernel_size,\n bias=False))\n elif layer == 'norm':\n layers.append(build_norm_layer(norm_cfg, out_channels)[1])\n elif layer == 'act':\n layers.append(nn.ReLU(inplace=True))\n\n layers = SparseSequential(*layers)\n return layers\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nfrom concurrent import futures as futures\nfrom os import path as osp\n\nimport mmcv\nimport numpy as np\n\n\nclass ScanNetData(object):\n \"\"\"ScanNet data.\n\n Generate scannet infos for scannet_converter.\n\n Args:\n root_path (str): Root path of the raw data.\n split (str, optional): Set split type of the data. Default: 'train'.\n \"\"\"\n\n def __init__(self, root_path, split='train'):\n self.root_dir = root_path\n self.split = split\n self.split_dir = osp.join(root_path)\n self.classes = [\n 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',\n 'bookshelf', 'picture', 'counter', 'desk', 'curtain',\n 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',\n 'garbagebin'\n ]\n self.cat2label = {cat: self.classes.index(cat) for cat in self.classes}\n self.label2cat = {self.cat2label[t]: t for t in self.cat2label}\n self.cat_ids = np.array(\n [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])\n self.cat_ids2class = {\n nyu40id: i\n for i, nyu40id in enumerate(list(self.cat_ids))\n }\n assert split in ['train', 'val', 'test']\n split_file = osp.join(self.root_dir, 'meta_data',\n f'scannetv2_{split}.txt')\n mmcv.check_file_exist(split_file)\n self.sample_id_list = mmcv.list_from_file(split_file)\n self.test_mode = (split == 'test')\n\n def __len__(self):\n return len(self.sample_id_list)\n\n def get_aligned_box_label(self, idx):\n box_file = osp.join(self.root_dir, 'scannet_instance_data',\n f'{idx}_aligned_bbox.npy')\n mmcv.check_file_exist(box_file)\n return np.load(box_file)\n\n def get_unaligned_box_label(self, idx):\n box_file = osp.join(self.root_dir, 'scannet_instance_data',\n f'{idx}_unaligned_bbox.npy')\n mmcv.check_file_exist(box_file)\n return np.load(box_file)\n\n def get_axis_align_matrix(self, idx):\n matrix_file = osp.join(self.root_dir, 'scannet_instance_data',\n f'{idx}_axis_align_matrix.npy')\n mmcv.check_file_exist(matrix_file)\n return np.load(matrix_file)\n\n def get_images(self, idx):\n paths = []\n path = osp.join(self.root_dir, 'posed_images', idx)\n for file in sorted(os.listdir(path)):\n if file.endswith('.jpg'):\n paths.append(osp.join('posed_images', idx, file))\n return paths\n\n def get_extrinsics(self, idx):\n extrinsics = []\n path = osp.join(self.root_dir, 'posed_images', idx)\n for file in sorted(os.listdir(path)):\n if file.endswith('.txt') and not file == 'intrinsic.txt':\n extrinsics.append(np.loadtxt(osp.join(path, file)))\n return extrinsics\n\n def get_intrinsics(self, idx):\n matrix_file = osp.join(self.root_dir, 'posed_images', idx,\n 'intrinsic.txt')\n mmcv.check_file_exist(matrix_file)\n return np.loadtxt(matrix_file)\n\n def get_infos(self, num_workers=4, has_label=True, sample_id_list=None):\n \"\"\"Get data infos.\n\n This method gets information from the raw data.\n\n Args:\n num_workers (int, optional): Number of threads to be used.\n Default: 4.\n has_label (bool, optional): Whether the data has label.\n Default: True.\n sample_id_list (list[int], optional): Index list of the sample.\n Default: None.\n\n Returns:\n infos (list[dict]): Information of the raw data.\n \"\"\"\n\n def process_single_scene(sample_idx):\n print(f'{self.split} sample_idx: {sample_idx}')\n info = dict()\n pc_info = {'num_features': 6, 'lidar_idx': sample_idx}\n info['point_cloud'] = pc_info\n pts_filename = osp.join(self.root_dir, 'scannet_instance_data',\n f'{sample_idx}_vert.npy')\n points = np.load(pts_filename)\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points'))\n points.tofile(\n osp.join(self.root_dir, 'points', f'{sample_idx}.bin'))\n info['pts_path'] = osp.join('points', f'{sample_idx}.bin')\n\n # update with RGB image paths if exist\n if os.path.exists(osp.join(self.root_dir, 'posed_images')):\n info['intrinsics'] = self.get_intrinsics(sample_idx)\n all_extrinsics = self.get_extrinsics(sample_idx)\n all_img_paths = self.get_images(sample_idx)\n # some poses in ScanNet are invalid\n extrinsics, img_paths = [], []\n for extrinsic, img_path in zip(all_extrinsics, all_img_paths):\n if np.all(np.isfinite(extrinsic)):\n img_paths.append(img_path)\n extrinsics.append(extrinsic)\n info['extrinsics'] = extrinsics\n info['img_paths'] = img_paths\n\n if not self.test_mode:\n pts_instance_mask_path = osp.join(\n self.root_dir, 'scannet_instance_data',\n f'{sample_idx}_ins_label.npy')\n pts_semantic_mask_path = osp.join(\n self.root_dir, 'scannet_instance_data',\n f'{sample_idx}_sem_label.npy')\n\n pts_instance_mask = np.load(pts_instance_mask_path).astype(\n np.int64)\n pts_semantic_mask = np.load(pts_semantic_mask_path).astype(\n np.int64)\n\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask'))\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask'))\n\n pts_instance_mask.tofile(\n osp.join(self.root_dir, 'instance_mask',\n f'{sample_idx}.bin'))\n pts_semantic_mask.tofile(\n osp.join(self.root_dir, 'semantic_mask',\n f'{sample_idx}.bin'))\n\n info['pts_instance_mask_path'] = osp.join(\n 'instance_mask', f'{sample_idx}.bin')\n info['pts_semantic_mask_path'] = osp.join(\n 'semantic_mask', f'{sample_idx}.bin')\n\n if has_label:\n annotations = {}\n # box is of shape [k, 6 + class]\n aligned_box_label = self.get_aligned_box_label(sample_idx)\n unaligned_box_label = self.get_unaligned_box_label(sample_idx)\n annotations['gt_num'] = aligned_box_label.shape[0]\n if annotations['gt_num'] != 0:\n aligned_box = aligned_box_label[:, :-1] # k, 6\n unaligned_box = unaligned_box_label[:, :-1]\n classes = aligned_box_label[:, -1] # k\n annotations['name'] = np.array([\n self.label2cat[self.cat_ids2class[classes[i]]]\n for i in range(annotations['gt_num'])\n ])\n # default names are given to aligned bbox for compatibility\n # we also save unaligned bbox info with marked names\n annotations['location'] = aligned_box[:, :3]\n annotations['dimensions'] = aligned_box[:, 3:6]\n annotations['gt_boxes_upright_depth'] = aligned_box\n annotations['unaligned_location'] = unaligned_box[:, :3]\n annotations['unaligned_dimensions'] = unaligned_box[:, 3:6]\n annotations[\n 'unaligned_gt_boxes_upright_depth'] = unaligned_box\n annotations['index'] = np.arange(\n annotations['gt_num'], dtype=np.int32)\n annotations['class'] = np.array([\n self.cat_ids2class[classes[i]]\n for i in range(annotations['gt_num'])\n ])\n axis_align_matrix = self.get_axis_align_matrix(sample_idx)\n annotations['axis_align_matrix'] = axis_align_matrix # 4x4\n info['annos'] = annotations\n return info\n\n sample_id_list = sample_id_list if sample_id_list is not None \\\n else self.sample_id_list\n with futures.ThreadPoolExecutor(num_workers) as executor:\n infos = executor.map(process_single_scene, sample_id_list)\n return list(infos)\n\n\nclass ScanNetSegData(object):\n \"\"\"ScanNet dataset used to generate infos for semantic segmentation task.\n\n Args:\n data_root (str): Root path of the raw data.\n ann_file (str): The generated scannet infos.\n split (str, optional): Set split type of the data. Default: 'train'.\n num_points (int, optional): Number of points in each data input.\n Default: 8192.\n label_weight_func (function, optional): Function to compute the\n label weight. Default: None.\n \"\"\"\n\n def __init__(self,\n data_root,\n ann_file,\n split='train',\n num_points=8192,\n label_weight_func=None):\n self.data_root = data_root\n self.data_infos = mmcv.load(ann_file)\n self.split = split\n assert split in ['train', 'val', 'test']\n self.num_points = num_points\n\n self.all_ids = np.arange(41) # all possible ids\n self.cat_ids = np.array([\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36,\n 39\n ]) # used for seg task\n self.ignore_index = len(self.cat_ids)\n\n self.cat_id2class = np.ones((self.all_ids.shape[0],), dtype=np.int) * \\\n self.ignore_index\n for i, cat_id in enumerate(self.cat_ids):\n self.cat_id2class[cat_id] = i\n\n # label weighting function is taken from\n # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24\n self.label_weight_func = (lambda x: 1.0 / np.log(1.2 + x)) if \\\n label_weight_func is None else label_weight_func\n\n def get_seg_infos(self):\n if self.split == 'test':\n return\n scene_idxs, label_weight = self.get_scene_idxs_and_label_weight()\n save_folder = osp.join(self.data_root, 'seg_info')\n mmcv.mkdir_or_exist(save_folder)\n np.save(\n osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'),\n scene_idxs)\n np.save(\n osp.join(save_folder, f'{self.split}_label_weight.npy'),\n label_weight)\n print(f'{self.split} resampled scene index and label weight saved')\n\n def _convert_to_label(self, mask):\n \"\"\"Convert class_id in loaded segmentation mask to label.\"\"\"\n if isinstance(mask, str):\n if mask.endswith('npy'):\n mask = np.load(mask)\n else:\n mask = np.fromfile(mask, dtype=np.int64)\n label = self.cat_id2class[mask]\n return label\n\n def get_scene_idxs_and_label_weight(self):\n \"\"\"Compute scene_idxs for data sampling and label weight for loss\n calculation.\n\n We sample more times for scenes with more points. Label_weight is\n inversely proportional to number of class points.\n \"\"\"\n num_classes = len(self.cat_ids)\n num_point_all = []\n label_weight = np.zeros((num_classes + 1, )) # ignore_index\n for data_info in self.data_infos:\n label = self._convert_to_label(\n osp.join(self.data_root, data_info['pts_semantic_mask_path']))\n num_point_all.append(label.shape[0])\n class_count, _ = np.histogram(label, range(num_classes + 2))\n label_weight += class_count\n\n # repeat scene_idx for num_scene_point // num_sample_point times\n sample_prob = np.array(num_point_all) / float(np.sum(num_point_all))\n num_iter = int(np.sum(num_point_all) / float(self.num_points))\n scene_idxs = []\n for idx in range(len(self.data_infos)):\n scene_idxs.extend([idx] * int(round(sample_prob[idx] * num_iter)))\n scene_idxs = np.array(scene_idxs).astype(np.int32)\n\n # calculate label weight, adopted from PointNet++\n label_weight = label_weight[:-1].astype(np.float32)\n label_weight = label_weight / label_weight.sum()\n label_weight = self.label_weight_func(label_weight).astype(np.float32)\n\n return scene_idxs, label_weight\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport tempfile\nfrom os import path as osp\n\nimport mmcv\nimport numpy as np\nimport pyquaternion\nfrom nuscenes.utils.data_classes import Box as NuScenesBox\n\nfrom ..core import show_result\nfrom ..core.bbox import Box3DMode, Coord3DMode, LiDARInstance3DBoxes\nfrom .builder import DATASETS\nfrom .custom_3d import Custom3DDataset\nfrom .pipelines import Compose\n\n\[email protected]_module()\nclass NuScenesDataset(Custom3DDataset):\n r\"\"\"NuScenes Dataset.\n\n This class serves as the API for experiments on the NuScenes Dataset.\n\n Please refer to `NuScenes Dataset <https://www.nuscenes.org/download>`_\n for data downloading.\n\n Args:\n ann_file (str): Path of annotation file.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n data_root (str): Path of dataset root.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n load_interval (int, optional): Interval of loading the dataset. It is\n used to uniformly sample the dataset. Defaults to 1.\n with_velocity (bool, optional): Whether include velocity prediction\n into the experiments. Defaults to True.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n box_type_3d (str, optional): Type of 3D box of this dataset.\n Based on the `box_type_3d`, the dataset will encapsulate the box\n to its original format then converted them to `box_type_3d`.\n Defaults to 'LiDAR' in this dataset. Available options includes.\n - 'LiDAR': Box in LiDAR coordinates.\n - 'Depth': Box in depth coordinates, usually for indoor dataset.\n - 'Camera': Box in camera coordinates.\n filter_empty_gt (bool, optional): Whether to filter empty GT.\n Defaults to True.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n eval_version (bool, optional): Configuration version of evaluation.\n Defaults to 'detection_cvpr_2019'.\n use_valid_flag (bool, optional): Whether to use `use_valid_flag` key\n in the info file as mask to filter gt_boxes and gt_names.\n Defaults to False.\n \"\"\"\n NameMapping = {\n 'movable_object.barrier': 'barrier',\n 'vehicle.bicycle': 'bicycle',\n 'vehicle.bus.bendy': 'bus',\n 'vehicle.bus.rigid': 'bus',\n 'vehicle.car': 'car',\n 'vehicle.construction': 'construction_vehicle',\n 'vehicle.motorcycle': 'motorcycle',\n 'human.pedestrian.adult': 'pedestrian',\n 'human.pedestrian.child': 'pedestrian',\n 'human.pedestrian.construction_worker': 'pedestrian',\n 'human.pedestrian.police_officer': 'pedestrian',\n 'movable_object.trafficcone': 'traffic_cone',\n 'vehicle.trailer': 'trailer',\n 'vehicle.truck': 'truck'\n }\n DefaultAttribute = {\n 'car': 'vehicle.parked',\n 'pedestrian': 'pedestrian.moving',\n 'trailer': 'vehicle.parked',\n 'truck': 'vehicle.parked',\n 'bus': 'vehicle.moving',\n 'motorcycle': 'cycle.without_rider',\n 'construction_vehicle': 'vehicle.parked',\n 'bicycle': 'cycle.without_rider',\n 'barrier': '',\n 'traffic_cone': '',\n }\n AttrMapping = {\n 'cycle.with_rider': 0,\n 'cycle.without_rider': 1,\n 'pedestrian.moving': 2,\n 'pedestrian.standing': 3,\n 'pedestrian.sitting_lying_down': 4,\n 'vehicle.moving': 5,\n 'vehicle.parked': 6,\n 'vehicle.stopped': 7,\n }\n AttrMapping_rev = [\n 'cycle.with_rider',\n 'cycle.without_rider',\n 'pedestrian.moving',\n 'pedestrian.standing',\n 'pedestrian.sitting_lying_down',\n 'vehicle.moving',\n 'vehicle.parked',\n 'vehicle.stopped',\n ]\n # https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa\n ErrNameMapping = {\n 'trans_err': 'mATE',\n 'scale_err': 'mASE',\n 'orient_err': 'mAOE',\n 'vel_err': 'mAVE',\n 'attr_err': 'mAAE'\n }\n CLASSES = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle',\n 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone',\n 'barrier')\n\n def __init__(self,\n ann_file,\n pipeline=None,\n data_root=None,\n classes=None,\n load_interval=1,\n with_velocity=True,\n modality=None,\n box_type_3d='LiDAR',\n filter_empty_gt=True,\n test_mode=False,\n eval_version='detection_cvpr_2019',\n use_valid_flag=False):\n self.load_interval = load_interval\n self.use_valid_flag = use_valid_flag\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n pipeline=pipeline,\n classes=classes,\n modality=modality,\n box_type_3d=box_type_3d,\n filter_empty_gt=filter_empty_gt,\n test_mode=test_mode)\n\n self.with_velocity = with_velocity\n self.eval_version = eval_version\n from nuscenes.eval.detection.config import config_factory\n self.eval_detection_configs = config_factory(self.eval_version)\n if self.modality is None:\n self.modality = dict(\n use_camera=False,\n use_lidar=True,\n use_radar=False,\n use_map=False,\n use_external=False,\n )\n\n def get_cat_ids(self, idx):\n \"\"\"Get category distribution of single scene.\n\n Args:\n idx (int): Index of the data_info.\n\n Returns:\n dict[list]: for each category, if the current scene\n contains such boxes, store a list containing idx,\n otherwise, store empty list.\n \"\"\"\n info = self.data_infos[idx]\n if self.use_valid_flag:\n mask = info['valid_flag']\n gt_names = set(info['gt_names'][mask])\n else:\n gt_names = set(info['gt_names'])\n\n cat_ids = []\n for name in gt_names:\n if name in self.CLASSES:\n cat_ids.append(self.cat2id[name])\n return cat_ids\n\n def load_annotations(self, ann_file):\n \"\"\"Load annotations from ann_file.\n\n Args:\n ann_file (str): Path of the annotation file.\n\n Returns:\n list[dict]: List of annotations sorted by timestamps.\n \"\"\"\n data = mmcv.load(ann_file, file_format='pkl')\n data_infos = list(sorted(data['infos'], key=lambda e: e['timestamp']))\n data_infos = data_infos[::self.load_interval]\n self.metadata = data['metadata']\n self.version = self.metadata['version']\n return data_infos\n\n def get_data_info(self, index):\n \"\"\"Get data info according to the given index.\n\n Args:\n index (int): Index of the sample data to get.\n\n Returns:\n dict: Data information that will be passed to the data\n preprocessing pipelines. It includes the following keys:\n\n - sample_idx (str): Sample index.\n - pts_filename (str): Filename of point clouds.\n - sweeps (list[dict]): Infos of sweeps.\n - timestamp (float): Sample timestamp.\n - img_filename (str, optional): Image filename.\n - lidar2img (list[np.ndarray], optional): Transformations\n from lidar to different cameras.\n - ann_info (dict): Annotation info.\n \"\"\"\n info = self.data_infos[index]\n # standard protocol modified from SECOND.Pytorch\n input_dict = dict(\n sample_idx=info['token'],\n pts_filename=info['lidar_path'],\n sweeps=info['sweeps'],\n timestamp=info['timestamp'] / 1e6,\n )\n\n if self.modality['use_camera']:\n image_paths = []\n lidar2img_rts = []\n for cam_type, cam_info in info['cams'].items():\n image_paths.append(cam_info['data_path'])\n # obtain lidar to image transformation matrix\n lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation'])\n lidar2cam_t = cam_info[\n 'sensor2lidar_translation'] @ lidar2cam_r.T\n lidar2cam_rt = np.eye(4)\n lidar2cam_rt[:3, :3] = lidar2cam_r.T\n lidar2cam_rt[3, :3] = -lidar2cam_t\n intrinsic = cam_info['cam_intrinsic']\n viewpad = np.eye(4)\n viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic\n lidar2img_rt = (viewpad @ lidar2cam_rt.T)\n lidar2img_rts.append(lidar2img_rt)\n\n input_dict.update(\n dict(\n img_filename=image_paths,\n lidar2img=lidar2img_rts,\n ))\n\n if not self.test_mode:\n annos = self.get_ann_info(index)\n input_dict['ann_info'] = annos\n\n return input_dict\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: Annotation information consists of the following keys:\n\n - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`):\n 3D ground truth bboxes\n - gt_labels_3d (np.ndarray): Labels of ground truths.\n - gt_names (list[str]): Class names of ground truths.\n \"\"\"\n info = self.data_infos[index]\n # filter out bbox containing no points\n if self.use_valid_flag:\n mask = info['valid_flag']\n else:\n mask = info['num_lidar_pts'] > 0\n gt_bboxes_3d = info['gt_boxes'][mask]\n gt_names_3d = info['gt_names'][mask]\n gt_labels_3d = []\n for cat in gt_names_3d:\n if cat in self.CLASSES:\n gt_labels_3d.append(self.CLASSES.index(cat))\n else:\n gt_labels_3d.append(-1)\n gt_labels_3d = np.array(gt_labels_3d)\n\n if self.with_velocity:\n gt_velocity = info['gt_velocity'][mask]\n nan_mask = np.isnan(gt_velocity[:, 0])\n gt_velocity[nan_mask] = [0.0, 0.0]\n gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_velocity], axis=-1)\n\n # the nuscenes box center is [0.5, 0.5, 0.5], we change it to be\n # the same as KITTI (0.5, 0.5, 0)\n gt_bboxes_3d = LiDARInstance3DBoxes(\n gt_bboxes_3d,\n box_dim=gt_bboxes_3d.shape[-1],\n origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)\n\n anns_results = dict(\n gt_bboxes_3d=gt_bboxes_3d,\n gt_labels_3d=gt_labels_3d,\n gt_names=gt_names_3d)\n return anns_results\n\n def _format_bbox(self, results, jsonfile_prefix=None):\n \"\"\"Convert the results to the standard format.\n\n Args:\n results (list[dict]): Testing results of the dataset.\n jsonfile_prefix (str): The prefix of the output jsonfile.\n You can specify the output directory/filename by\n modifying the jsonfile_prefix. Default: None.\n\n Returns:\n str: Path of the output json file.\n \"\"\"\n nusc_annos = {}\n mapped_class_names = self.CLASSES\n\n print('Start to convert detection format...')\n for sample_id, det in enumerate(mmcv.track_iter_progress(results)):\n annos = []\n boxes = output_to_nusc_box(det)\n sample_token = self.data_infos[sample_id]['token']\n boxes = lidar_nusc_box_to_global(self.data_infos[sample_id], boxes,\n mapped_class_names,\n self.eval_detection_configs,\n self.eval_version)\n for i, box in enumerate(boxes):\n name = mapped_class_names[box.label]\n if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2:\n if name in [\n 'car',\n 'construction_vehicle',\n 'bus',\n 'truck',\n 'trailer',\n ]:\n attr = 'vehicle.moving'\n elif name in ['bicycle', 'motorcycle']:\n attr = 'cycle.with_rider'\n else:\n attr = NuScenesDataset.DefaultAttribute[name]\n else:\n if name in ['pedestrian']:\n attr = 'pedestrian.standing'\n elif name in ['bus']:\n attr = 'vehicle.stopped'\n else:\n attr = NuScenesDataset.DefaultAttribute[name]\n\n nusc_anno = dict(\n sample_token=sample_token,\n translation=box.center.tolist(),\n size=box.wlh.tolist(),\n rotation=box.orientation.elements.tolist(),\n velocity=box.velocity[:2].tolist(),\n detection_name=name,\n detection_score=box.score,\n attribute_name=attr)\n annos.append(nusc_anno)\n nusc_annos[sample_token] = annos\n nusc_submissions = {\n 'meta': self.modality,\n 'results': nusc_annos,\n }\n\n mmcv.mkdir_or_exist(jsonfile_prefix)\n res_path = osp.join(jsonfile_prefix, 'results_nusc.json')\n print('Results writes to', res_path)\n mmcv.dump(nusc_submissions, res_path)\n return res_path\n\n def _evaluate_single(self,\n result_path,\n logger=None,\n metric='bbox',\n result_name='pts_bbox'):\n \"\"\"Evaluation for a single model in nuScenes protocol.\n\n Args:\n result_path (str): Path of the result file.\n logger (logging.Logger | str, optional): Logger used for printing\n related information during evaluation. Default: None.\n metric (str, optional): Metric name used for evaluation.\n Default: 'bbox'.\n result_name (str, optional): Result name in the metric prefix.\n Default: 'pts_bbox'.\n\n Returns:\n dict: Dictionary of evaluation details.\n \"\"\"\n from nuscenes import NuScenes\n from nuscenes.eval.detection.evaluate import NuScenesEval\n\n output_dir = osp.join(*osp.split(result_path)[:-1])\n nusc = NuScenes(\n version=self.version, dataroot=self.data_root, verbose=False)\n eval_set_map = {\n 'v1.0-mini': 'mini_val',\n 'v1.0-trainval': 'val',\n }\n nusc_eval = NuScenesEval(\n nusc,\n config=self.eval_detection_configs,\n result_path=result_path,\n eval_set=eval_set_map[self.version],\n output_dir=output_dir,\n verbose=False)\n nusc_eval.main(render_curves=False)\n\n # record metrics\n metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))\n detail = dict()\n metric_prefix = f'{result_name}_NuScenes'\n for name in self.CLASSES:\n for k, v in metrics['label_aps'][name].items():\n val = float('{:.4f}'.format(v))\n detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val\n for k, v in metrics['label_tp_errors'][name].items():\n val = float('{:.4f}'.format(v))\n detail['{}/{}_{}'.format(metric_prefix, name, k)] = val\n for k, v in metrics['tp_errors'].items():\n val = float('{:.4f}'.format(v))\n detail['{}/{}'.format(metric_prefix,\n self.ErrNameMapping[k])] = val\n\n detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']\n detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']\n return detail\n\n def format_results(self, results, jsonfile_prefix=None):\n \"\"\"Format the results to json (standard format for COCO evaluation).\n\n Args:\n results (list[dict]): Testing results of the dataset.\n jsonfile_prefix (str): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: Returns (result_files, tmp_dir), where `result_files` is a\n dict containing the json filepaths, `tmp_dir` is the temporal\n directory created for saving json files when\n `jsonfile_prefix` is not specified.\n \"\"\"\n assert isinstance(results, list), 'results must be a list'\n assert len(results) == len(self), (\n 'The length of results is not equal to the dataset len: {} != {}'.\n format(len(results), len(self)))\n\n if jsonfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n jsonfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n\n # currently the output prediction results could be in two formats\n # 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)\n # 2. list of dict('pts_bbox' or 'img_bbox':\n # dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...))\n # this is a workaround to enable evaluation of both formats on nuScenes\n # refer to https://github.com/open-mmlab/mmdetection3d/issues/449\n if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]):\n result_files = self._format_bbox(results, jsonfile_prefix)\n else:\n # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict\n result_files = dict()\n for name in results[0]:\n print(f'\\nFormating bboxes of {name}')\n results_ = [out[name] for out in results]\n tmp_file_ = osp.join(jsonfile_prefix, name)\n result_files.update(\n {name: self._format_bbox(results_, tmp_file_)})\n return result_files, tmp_dir\n\n def evaluate(self,\n results,\n metric='bbox',\n logger=None,\n jsonfile_prefix=None,\n result_names=['pts_bbox'],\n show=False,\n out_dir=None,\n pipeline=None):\n \"\"\"Evaluation in nuScenes protocol.\n\n Args:\n results (list[dict]): Testing results of the dataset.\n metric (str | list[str], optional): Metrics to be evaluated.\n Default: 'bbox'.\n logger (logging.Logger | str, optional): Logger used for printing\n related information during evaluation. Default: None.\n jsonfile_prefix (str, optional): The prefix of json files including\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n show (bool, optional): Whether to visualize.\n Default: False.\n out_dir (str, optional): Path to save the visualization results.\n Default: None.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n\n Returns:\n dict[str, float]: Results of each evaluation metric.\n \"\"\"\n result_files, tmp_dir = self.format_results(results, jsonfile_prefix)\n\n if isinstance(result_files, dict):\n results_dict = dict()\n for name in result_names:\n print('Evaluating bboxes of {}'.format(name))\n ret_dict = self._evaluate_single(result_files[name])\n results_dict.update(ret_dict)\n elif isinstance(result_files, str):\n results_dict = self._evaluate_single(result_files)\n\n if tmp_dir is not None:\n tmp_dir.cleanup()\n\n if show or out_dir:\n self.show(results, out_dir, show=show, pipeline=pipeline)\n return results_dict\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=5,\n use_dim=5,\n file_client_args=dict(backend='disk')),\n dict(\n type='LoadPointsFromMultiSweeps',\n sweeps_num=10,\n file_client_args=dict(backend='disk')),\n dict(\n type='DefaultFormatBundle3D',\n class_names=self.CLASSES,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ]\n return Compose(pipeline)\n\n def show(self, results, out_dir, show=False, pipeline=None):\n \"\"\"Results visualization.\n\n Args:\n results (list[dict]): List of bounding boxes results.\n out_dir (str): Output directory of visualization result.\n show (bool): Whether to visualize the results online.\n Default: False.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n pipeline = self._get_pipeline(pipeline)\n for i, result in enumerate(results):\n if 'pts_bbox' in result.keys():\n result = result['pts_bbox']\n data_info = self.data_infos[i]\n pts_path = data_info['lidar_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points = self._extract_data(i, pipeline, 'points').numpy()\n # for now we convert points into depth mode\n points = Coord3DMode.convert_point(points, Coord3DMode.LIDAR,\n Coord3DMode.DEPTH)\n inds = result['scores_3d'] > 0.1\n gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor.numpy()\n show_gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR,\n Box3DMode.DEPTH)\n pred_bboxes = result['boxes_3d'][inds].tensor.numpy()\n show_pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR,\n Box3DMode.DEPTH)\n show_result(points, show_gt_bboxes, show_pred_bboxes, out_dir,\n file_name, show)\n\n\ndef output_to_nusc_box(detection):\n \"\"\"Convert the output to the box class in the nuScenes.\n\n Args:\n detection (dict): Detection results.\n\n - boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox.\n - scores_3d (torch.Tensor): Detection scores.\n - labels_3d (torch.Tensor): Predicted box labels.\n\n Returns:\n list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes.\n \"\"\"\n box3d = detection['boxes_3d']\n scores = detection['scores_3d'].numpy()\n labels = detection['labels_3d'].numpy()\n\n box_gravity_center = box3d.gravity_center.numpy()\n box_dims = box3d.dims.numpy()\n box_yaw = box3d.yaw.numpy()\n\n # our LiDAR coordinate system -> nuScenes box coordinate system\n nus_box_dims = box_dims[:, [1, 0, 2]]\n\n box_list = []\n for i in range(len(box3d)):\n quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i])\n velocity = (*box3d.tensor[i, 7:9], 0.0)\n # velo_val = np.linalg.norm(box3d[i, 7:9])\n # velo_ori = box3d[i, 6]\n # velocity = (\n # velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0)\n box = NuScenesBox(\n box_gravity_center[i],\n nus_box_dims[i],\n quat,\n label=labels[i],\n score=scores[i],\n velocity=velocity)\n box_list.append(box)\n return box_list\n\n\ndef lidar_nusc_box_to_global(info,\n boxes,\n classes,\n eval_configs,\n eval_version='detection_cvpr_2019'):\n \"\"\"Convert the box from ego to global coordinate.\n\n Args:\n info (dict): Info for a specific sample data, including the\n calibration information.\n boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.\n classes (list[str]): Mapped classes in the evaluation.\n eval_configs (object): Evaluation configuration object.\n eval_version (str, optional): Evaluation version.\n Default: 'detection_cvpr_2019'\n\n Returns:\n list: List of standard NuScenesBoxes in the global\n coordinate.\n \"\"\"\n box_list = []\n for box in boxes:\n # Move box to ego vehicle coord system\n box.rotate(pyquaternion.Quaternion(info['lidar2ego_rotation']))\n box.translate(np.array(info['lidar2ego_translation']))\n # filter det in ego.\n cls_range_map = eval_configs.class_range\n radius = np.linalg.norm(box.center[:2], 2)\n det_range = cls_range_map[classes[box.label]]\n if radius > det_range:\n continue\n # Move box to global coord system\n box.rotate(pyquaternion.Quaternion(info['ego2global_rotation']))\n box.translate(np.array(info['ego2global_translation']))\n box_list.append(box)\n return box_list\n" ]
[ [ "torch.all", "numpy.allclose", "numpy.random.seed", "torch.tensor", "numpy.all", "torch.cuda.is_available", "torch.allclose", "numpy.array", "numpy.isclose" ], [ "numpy.random.seed", "torch.tensor" ], [ "torch.distributed.get_rank" ], [ "torch.stack" ], [ "torch.nn.ReLU" ], [ "numpy.log", "numpy.fromfile", "numpy.isfinite", "numpy.arange", "numpy.ones", "numpy.load", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.loadtxt" ], [ "numpy.sqrt", "numpy.isnan", "numpy.linalg.inv", "numpy.eye", "numpy.linalg.norm", "numpy.concatenate", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
millingermarkus/pypsa-eur
[ "2e39a21299036c0cec86fe4707de06a42ec15d62" ]
[ "scripts/build_load_data.py" ]
[ "# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors\n#\n# SPDX-License-Identifier: MIT\n\n\"\"\"\n\nThis rule downloads the load data from `Open Power System Data Time series <https://data.open-power-system-data.org/time_series/>`_. For all countries in the network, the per country load timeseries with suffix ``_load_actual_entsoe_transparency`` are extracted from the dataset. After filling small gaps linearly and large gaps by copying time-slice of a given period, the load data is exported to a ``.csv`` file.\n\nRelevant Settings\n-----------------\n\n.. code:: yaml\n\n snapshots:\n\n load:\n interpolate_limit:\n time_shift_for_large_gaps:\n manual_adjustments:\n\n\n.. seealso::\n Documentation of the configuration file ``config.yaml`` at\n :ref:`load_cf`\n\nInputs\n------\n\n\nOutputs\n-------\n\n- ``resource/time_series_60min_singleindex_filtered.csv``:\n\n\n\"\"\"\n\nimport logging\nlogger = logging.getLogger(__name__)\nfrom _helpers import configure_logging\n\nimport pandas as pd\nimport numpy as np\nimport dateutil\nfrom pandas import Timedelta as Delta\n\n\ndef load_timeseries(fn, years, countries, powerstatistics=True):\n \"\"\"\n Read load data from OPSD time-series package version 2020-10-06.\n\n Parameters\n ----------\n years : None or slice()\n Years for which to read load data (defaults to\n slice(\"2018\",\"2019\"))\n fn : str\n File name or url location (file format .csv)\n countries : listlike\n Countries for which to read load data.\n powerstatistics: bool\n Whether the electricity consumption data of the ENTSOE power\n statistics (if true) or of the ENTSOE transparency map (if false)\n should be parsed.\n\n Returns\n -------\n load : pd.DataFrame\n Load time-series with UTC timestamps x ISO-2 countries\n \"\"\"\n logger.info(f\"Retrieving load data from '{fn}'.\")\n\n pattern = 'power_statistics' if powerstatistics else '_transparency'\n pattern = f'_load_actual_entsoe_{pattern}'\n rename = lambda s: s[:-len(pattern)]\n date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True)\n return (pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)\n .filter(like=pattern)\n .rename(columns=rename)\n .dropna(how=\"all\", axis=0)\n .rename(columns={'GB_UKM' : 'GB'})\n .filter(items=countries)\n .loc[years])\n\n\ndef consecutive_nans(ds):\n return (ds.isnull().astype(int)\n .groupby(ds.notnull().astype(int).cumsum()[ds.isnull()])\n .transform('sum').fillna(0))\n\n\ndef fill_large_gaps(ds, shift):\n \"\"\"\n Fill up large gaps with load data from the previous week.\n\n This function fills gaps ragning from 3 to 168 hours (one week).\n \"\"\"\n shift = Delta(shift)\n nhours = shift / np.timedelta64(1, 'h')\n if (consecutive_nans(ds) > nhours).any():\n logger.warning('There exist gaps larger then the time shift used for '\n 'copying time slices.')\n time_shift = pd.Series(ds.values, ds.index + shift)\n return ds.where(ds.notnull(), time_shift.reindex_like(ds))\n\n\ndef nan_statistics(df):\n def max_consecutive_nans(ds):\n return (ds.isnull().astype(int)\n .groupby(ds.notnull().astype(int).cumsum())\n .sum().max())\n consecutive = df.apply(max_consecutive_nans)\n total = df.isnull().sum()\n max_total_per_month = df.isnull().resample('m').sum().max()\n return pd.concat([total, consecutive, max_total_per_month],\n keys=['total', 'consecutive', 'max_total_per_month'], axis=1)\n\n\ndef copy_timeslice(load, cntry, start, stop, delta):\n start = pd.Timestamp(start)\n stop = pd.Timestamp(stop)\n if start-delta in load.index and stop in load.index and cntry in load:\n load.loc[start:stop, cntry] = load.loc[start-delta:stop-delta, cntry].values\n\n\ndef manual_adjustment(load, powerstatistics):\n \"\"\"\n Adjust gaps manual for load data from OPSD time-series package.\n\n 1. For the ENTSOE power statistics load data (if powerstatistics is True)\n\n Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the\n same load curve as Serbia and Albania the same as Macdedonia, both scaled\n by the corresponding ratio of total energy consumptions reported by\n IEA Data browser [0] for the year 2013.\n\n 2. For the ENTSOE transparency load data (if powerstatistics is False)\n\n Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the\n same load curve as Montenegro, scaled by the corresponding ratio of total energy\n consumptions reported by IEA Data browser [0] for the year 2016.\n\n [0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons\n\n\n Parameters\n ----------\n load : pd.DataFrame\n Load time-series with UTC timestamps x ISO-2 countries\n powerstatistics: bool\n Whether argument load comprises the electricity consumption data of\n the ENTSOE power statistics or of the ENTSOE transparency map\n\n Returns\n -------\n load : pd.DataFrame\n Manual adjusted and interpolated load time-series with UTC\n timestamps x ISO-2 countries\n \"\"\"\n\n if powerstatistics:\n if 'MK' in load.columns:\n if 'AL' not in load.columns or load.AL.isnull().values.all():\n load['AL'] = load['MK'] * (4.1 / 7.4)\n if 'RS' in load.columns:\n if 'KV' not in load.columns or load.KV.isnull().values.all():\n load['KV'] = load['RS'] * (4.8 / 27.)\n\n copy_timeslice(load, 'GR', '2015-08-11 21:00', '2015-08-15 20:00', Delta(weeks=1))\n copy_timeslice(load, 'AT', '2018-12-31 22:00', '2019-01-01 22:00', Delta(days=2))\n copy_timeslice(load, 'CH', '2010-01-19 07:00', '2010-01-19 22:00', Delta(days=1))\n copy_timeslice(load, 'CH', '2010-03-28 00:00', '2010-03-28 21:00', Delta(days=1))\n # is a WE, so take WE before\n copy_timeslice(load, 'CH', '2010-10-08 13:00', '2010-10-10 21:00', Delta(weeks=1))\n copy_timeslice(load, 'CH', '2010-11-04 04:00', '2010-11-04 22:00', Delta(days=1))\n copy_timeslice(load, 'NO', '2010-12-09 11:00', '2010-12-09 18:00', Delta(days=1))\n # whole january missing\n copy_timeslice(load, 'GB', '2009-12-31 23:00', '2010-01-31 23:00', Delta(days=-364))\n\n else:\n if 'ME' in load:\n if 'AL' not in load and 'AL' in countries:\n load['AL'] = load.ME * (5.7/2.9)\n if 'MK' not in load and 'MK' in countries:\n load['MK'] = load.ME * (6.7/2.9)\n copy_timeslice(load, 'BG', '2018-10-27 21:00', '2018-10-28 22:00', Delta(weeks=1))\n\n return load\n\n\nif __name__ == \"__main__\":\n\n if 'snakemake' not in globals():\n from _helpers import mock_snakemake\n snakemake = mock_snakemake('build_load_data')\n\n configure_logging(snakemake)\n\n config = snakemake.config\n powerstatistics = config['load']['power_statistics']\n interpolate_limit = config['load']['interpolate_limit']\n countries = config['countries']\n snapshots = pd.date_range(freq='h', **config['snapshots'])\n years = slice(snapshots[0], snapshots[-1])\n time_shift = config['load']['time_shift_for_large_gaps']\n\n load = load_timeseries(snakemake.input[0], years, countries, powerstatistics)\n\n if config['load']['manual_adjustments']:\n load = manual_adjustment(load, powerstatistics)\n\n logger.info(f\"Linearly interpolate gaps of size {interpolate_limit} and less.\")\n load = load.interpolate(method='linear', limit=interpolate_limit)\n\n logger.info(\"Filling larger gaps by copying time-slices of period \"\n f\"'{time_shift}'.\")\n load = load.apply(fill_large_gaps, shift=time_shift)\n\n assert not load.isna().any().any(), (\n 'Load data contains nans. Adjust the parameters '\n '`time_shift_for_large_gaps` or modify the `manual_adjustment` function '\n 'for implementing the needed load data modifications.')\n\n load.to_csv(snakemake.output[0])\n\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.Series", "pandas.Timedelta", "numpy.timedelta64", "pandas.date_range", "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
daiki-kimura/commonsense-rl
[ "5513926957b6501ce9cfa46f77f8f2c1c4892fa5", "5513926957b6501ce9cfa46f77f8f2c1c4892fa5" ]
[ "utils_twc/kg.py", "train_agent.py" ]
[ "import sys\nimport networkx as nx\nimport logging\nimport json\nimport requests\nfrom pathlib import Path\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nfrom utils_twc.generic import escape_entities\n\n# Logging formatting\nFORMAT = '%(asctime)s %(message)s'\nlogging.basicConfig(format=FORMAT, level='INFO', stream=sys.stdout)\nkg = {}\nsource_paths= defaultdict(dict)\n\n\ndef shortest_path_subgraph(kg_graph, prev_graph, nodes, inventory_entities=None, command_entities=None, path_len=2, add_all_path=False):\n if inventory_entities is None:\n inventory_entities = []\n if command_entities is None:\n command_entities = []\n # Get non-neighbor nodes: nodes without edges between them\n world_graph = kg_graph.subgraph(list(prev_graph.nodes)+nodes).copy()\n world_graph = nx.compose(prev_graph,world_graph)\n world_graph.remove_edges_from(nx.selfloop_edges(world_graph))\n\n if path_len < 2:\n return world_graph\n triplets = []\n # Add command related relations\n pruned_entities = list(set(command_entities)-set(inventory_entities))\n if pruned_entities:\n for src_et in inventory_entities:\n for tgt_et in pruned_entities:\n if src_et != tgt_et:\n try:\n pair_dist = nx.shortest_path_length(kg_graph, source=src_et, target=tgt_et)\n except nx.NetworkXNoPath:\n pair_dist = 0\n if pair_dist >= 1 and pair_dist <= path_len:\n triplets.append([src_et, tgt_et, 'relatedTo'])\n else: # no items in the pruned entities, won't happen\n for entities in command_entities:\n for src_et in entities:\n for tgt_et in entities:\n if src_et != tgt_et:\n try:\n pair_dist = nx.shortest_path_length(kg_graph, source=src_et, target=tgt_et)\n except nx.NetworkXNoPath:\n pair_dist=0\n if pair_dist >= 1 and pair_dist <= path_len:\n triplets.append([src_et, tgt_et, 'relatedTo'])\n world_graph, _= add_triplets_to_graph(world_graph, triplets)\n return world_graph\n\n\ndef construct_graph(triplets):\n graph = nx.DiGraph()\n entities = {}\n for [e1, e2, r] in triplets:\n e1 = e1.lower().strip()\n e2 = e2.lower().strip()\n r = r.lower().strip()\n if e1 not in entities:\n graph.add_node(e1)\n entities[e1] = e1\n if e2 not in entities:\n graph.add_node(e2)\n entities[e2] = e2\n # Add Edge information\n if graph.has_edge(e1, e2):\n if r not in graph.edges[e1, e2]['relation']:\n graph.edges[e1, e2]['relation'] += ' ' + r\n else:\n graph.add_edge(e1, e2, relation=r)\n return graph, entities\n\n\ndef add_triplets_to_graph(graph, triplets):\n entities = dict(graph.nodes.data())\n for [e1, e2, r] in triplets:\n e1 = e1.lower().strip()\n e2 = e2.lower().strip()\n r = r.lower().strip()\n if e1 not in entities:\n graph.add_node(e1)\n entities[e1] = e1\n if e2 not in entities:\n graph.add_node(e2)\n entities[e2] = e2\n # Add Edge information\n if graph.has_edge(e1, e2):\n if r not in graph.edges[e1, e2]['relation']:\n graph.edges[e1, e2]['relation'] += ' ' + r\n else:\n graph.add_edge(e1, e2, relation=r)\n return graph, entities\n\n\ndef draw_graph(graph, title=\"cleanup\", show_relation=True, weights=None, pos=None):\n if not pos:\n pos = nx.spring_layout(graph, k=0.95)\n if weights:\n nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000, node_color=weights.tolist(),\n vmin=np.min(weights), vmax=np.max(weights), node_shape='o', alpha=0.9, font_size=8, with_labels=True,\n label=title,cmap='Blues')\n else:\n nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000, node_color='pink',\n node_shape='o', alpha=0.9, font_size=8, with_labels=True, label=title)\n if show_relation:\n p_edge = nx.draw_networkx_edge_labels(graph, pos, font_size=6, font_color='red',\n edge_labels=nx.get_edge_attributes(graph, 'relation'))\n\n\ndef draw_graph_colormap(graph,node_weights, showbar=False, cmap='YlGnBu'):\n # node_weights: maps node id/name to attention weights\n pos = nx.spring_layout(graph, k=0.95)\n weights = []\n for node in graph.nodes:\n weights.append(node_weights[node])\n # cmap = plt.cm.YlGnBu#RdBu\n cmap = plt.get_cmap(cmap)\n vmin = np.min(weights)\n vmax = np.max(weights)\n nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000,\n node_color=weights, vmin=vmin, vmax=vmax, cmap=cmap,\n node_shape='o', alpha=0.9, font_size=8, with_labels=True, label='Attention')\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))\n sm._A = []\n if showbar:\n plt.colorbar(sm)\n plt.show()\n\n\ndef construct_kg(filename: str, print_every=1e6, cache_load=True, logger=logging.getLogger(__name__)) -> (nx.DiGraph, list, set):\n # access edges with graph.edges.data('relation')\n if 'graph' in kg and cache_load:\n return kg['graph'], kg['triplets'], kg['entities']\n\n path = Path(filename)\n if not path.exists():\n filename = './kg/conceptnet/kg.txt'\n\n triplets = []\n with open(filename, 'r') as fp:\n for idx, line in enumerate(fp):\n e1, r, e2 = line.rstrip(\"\\n\").rsplit()\n triplets.append([e1.lower().strip(), e2.lower().strip(), r.lower().strip()])\n if idx % print_every == 0:\n print(\"*\",end='')\n [graph, entities] = construct_graph(triplets)\n graph = graph.to_undirected(as_view=True) # Since a->b ==> b->a\n if cache_load:\n kg['graph'] = graph\n kg['triplets'] = triplets\n kg['entities'] = entities\n return graph, triplets, entities\n\n\nclass RelationExtractor:\n def __init__(self, tokenizer, openie_url=\"http://localhost:9000/\"):\n \"\"\"\n :param tokenizer:\n :param openie_url: server url for Stanford Core NLPOpen IE\n \"\"\"\n self.tokenizer = tokenizer\n self.openie_url = openie_url\n self.kg_vocab = {}\n self.agent_loc = ''\n\n def call_stanford_openie(self,sentence):\n querystring = {\n \"properties\": \"%7B%22annotators%22%3A%20%22openie%22%7D\",\n \"pipelineLanguage\": \"en\"}\n response = requests.request(\"POST\", self.openie_url, data=sentence, params=querystring)\n response = json.JSONDecoder().decode(response.text)\n return response\n\n def fetch_triplets(self,text, current_graph, prev_action=None):\n triplets = []\n remove = []\n prev_remove = []\n link = []\n c_id = len(self.kg_vocab.keys())\n obs = self.tokenizer.clean_string(text, preprocess=True)\n dirs = ['north', 'south', 'east', 'west']\n obs = str(obs)\n doc = self.tokenizer.nlp_eval(obs)\n sents = {}\n try:\n sents = self.call_stanford_openie(doc.text)['sentences']\n except:\n print(\"Error in connecting to Stanford CoreNLP OpenIE Server\")\n for ov in sents:\n tokens = ov[\"tokens\"]\n triple = ov['openie']\n for tr in triple:\n h, r, t = tr['subject'].lower(), tr['relation'].lower(), tr['object'].lower()\n if h == 'we':\n h = 'you'\n if r == 'are in':\n r = \"'ve entered\"\n\n if h == 'it':\n break\n triplets.append((h, r, t))\n\n room = \"\"\n room_set = False\n for rule in triplets:\n h, r, t = rule\n if 'entered' in r or 'are in' in r or 'walked' in r:\n prev_remove.append(r)\n if not room_set:\n room = t\n room_set = True\n if 'should' in r:\n prev_remove.append(r)\n if 'see' in r or 'make out' in r:\n link.append((r, t))\n remove.append(r)\n # else:\n # link.append((r, t))\n\n prev_room = self.agent_loc\n self.agent_loc = room\n add_rules = []\n if prev_action is not None:\n for d in dirs:\n if d in prev_action and room != \"\":\n add_rules.append((prev_room, d + ' of', room))\n prev_room_subgraph = None\n prev_you_subgraph = None\n\n for sent in doc.sents:\n sent = sent.text\n if sent == ',' or sent == 'hm .':\n continue\n if 'exit' in sent or 'entranceway' in sent:\n for d in dirs:\n if d in sent:\n triplets.append((room, 'has', 'exit to ' + d))\n if prev_room != \"\":\n graph_copy = current_graph.copy()\n graph_copy.remove_edge('you', prev_room)\n con_cs = [graph_copy.subgraph(c) for c in nx.weakly_connected_components(graph_copy)]\n\n for con_c in con_cs:\n if prev_room in con_c.nodes:\n prev_room_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)\n if 'you' in con_c.nodes:\n prev_you_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)\n\n for l in link:\n add_rules.append((room, l[0], l[1]))\n\n for rule in triplets:\n h, r, t = rule\n if r == 'is in':\n if t == 'room':\n t = room\n if r not in remove:\n add_rules.append((h, r, t))\n edges = list(current_graph.edges)\n for edge in edges:\n r = 'relatedTo'\n if 'relation' in current_graph[edge[0]][edge[1]]:\n r = current_graph[edge[0]][edge[1]]['relation']\n if r in prev_remove:\n current_graph.remove_edge(*edge)\n\n if prev_you_subgraph is not None:\n current_graph.remove_edges_from(prev_you_subgraph.edges)\n\n for rule in add_rules:\n u = '_'.join(str(rule[0]).split())\n v = '_'.join(str(rule[2]).split())\n if u != 'it' and u not in self.kg_vocab:\n self.kg_vocab[u] = c_id\n c_id += 1\n if v != 'it' and v not in self.kg_vocab:\n self.kg_vocab[v] = c_id\n c_id += 1\n skip_flag = False\n for skip_token in self.tokenizer.ignore_list:\n if skip_token in u or skip_token in v:\n skip_flag = True\n if u != 'it' and v != 'it' and not skip_flag:\n r = str(rule[1]).lower()\n if not rule[1] or rule[1] == '':\n r = 'relatedTo'\n current_graph.add_edge(str(rule[0]).lower(), str(rule[2]).lower(), relation=r)\n prev_edges = current_graph.edges\n if prev_room_subgraph is not None:\n current_graph.add_edges_from(prev_room_subgraph.edges)\n current_edges = current_graph.edges\n return current_graph, add_rules\n\n\ndef khop_neighbor_graph(graph, entities, cutoff=1, max_khop_degree=None):\n all_entities = []\n for et in entities:\n candidates = nx.single_source_shortest_path(graph, et, cutoff=cutoff).keys()\n if not max_khop_degree or len(candidates)<=max_khop_degree:\n all_entities.extend(list(candidates))\n return graph.subgraph(set(entities)|set(all_entities))\n\n\ndef ego_graph_seed_expansion(graph, seed, radius, undirected=True, max_degree=None):\n working_graph = graph\n if undirected:\n working_graph = graph.to_undirected()\n marked = set(seed)\n nodes = set(seed)\n\n for _ in range(radius):\n border = set()\n for node in marked:\n neighbors = {n for n in working_graph[node]}\n if max_degree is None or len(neighbors) <= max_degree:\n border |= neighbors\n nodes |= border\n marked = border\n\n return graph.subgraph(nodes)\n\n\ndef shortest_path_seed_expansion(graph, seed, cutoff=None, undirected=True, keep_all=True):\n nodes = set(seed)\n seed = list(seed)\n\n working_graph = graph\n if undirected:\n working_graph = graph.to_undirected()\n for i in range(len(seed)):\n start = i + 1 if undirected else 0\n for j in range(start, len(seed)):\n try:\n if not keep_all:\n path = nx.shortest_path(working_graph, seed[i], seed[j])\n if cutoff is None or len(path) <= cutoff:\n nodes |= set(path)\n else:\n paths = nx.all_shortest_paths(working_graph, seed[i], seed[j])\n for p in paths:\n if cutoff is None or len(p) <= cutoff:\n nodes |= set(p)\n except nx.NetworkXNoPath:\n continue\n return graph.subgraph(nodes)\n\n\ndef load_manual_graphs(path):\n path = Path(path)\n manual_world_graphs = {}\n if not path.exists():\n print('None Found.')\n return manual_world_graphs\n\n files = path.rglob(\"conceptnet_manual_subgraph-*.tsv\")\n for file in files:\n game_id = str(file).split('-')[-1].split('.')[0]\n graph, triplets, entities = construct_kg(file, cache_load=False)\n manual_world_graphs[game_id]={}\n manual_world_graphs[game_id]['graph'] = graph\n manual_world_graphs[game_id]['triplets'] = triplets\n manual_world_graphs[game_id]['entities'] = entities\n print(' DONE')\n return manual_world_graphs\n\n\n\n\ndef kg_match(extractor, target_entities, kg_entities):\n result = set()\n kg_entities = escape_entities(kg_entities)\n for e in target_entities:\n e = e.lower().strip()\n result |= extractor(e, kg_entities)\n return result\n\n\ndef save_graph_tsv(graph, path):\n relation_map = nx.get_edge_attributes(graph, 'relation')\n lines = []\n for n1, n2 in graph.edges:\n relations = relation_map[n1, n2].split()\n for r in relations:\n lines.append(f'{n1}\\t{r}\\t{n2}\\n')\n with open(path, 'w') as f:\n f.writelines(lines)\n\n\nif __name__ == '__main__':\n from utils_twc import extractor\n from utils_twc.nlp import Tokenizer\n\n tk_extractor = extractor.get_extractor('max')\n tokenizer = Tokenizer(extractor=tk_extractor)\n rel_extract = RelationExtractor(tokenizer,openie_url='http://iqa962.sl.cloud9.ibm.com:9000/')\n # text = 'On the table, you see an apple, a hat, a key and an umbrella. '\n text = \"You've just walked into a Living Room. You try to gain information on your \" \\\n \"surroundings by using a technique you call looking. You can see a closet. \" \\\n \"You idly wonder how they came up with the name TextWorld for this place. \" \\\n \"It's pretty fitting. A closed standard looking antique trunk is in the room. \" \\\n \"You can see a table. The table is usual. On the table you see an apple, a mug, \" \\\n \"a newspaper, a note, a hat and a pencil. You smell a sickening smell, and follow \" \\\n \"it to a couch. The couch is standard. But the thing is empty. Hm. Oh well You see a \" \\\n \"gleam over in a corner, where you can see a tv stand. The tv stand is ordinary. \" \\\n \"On the tv stand you can make out a tv. You don't like doors? Why not try going east, \" \\\n \"that entranceway is unguarded. You are carrying nothing.\"\n sents = text\n # clauses = clausie.clausie(text)\n # propositions = clausie.extract_propositions(clauses)\n # sents = ''\n # for prop in propositions:\n # sent = clausie.proposition_text_str(prop)\n # sents += sent\n # print(sent)\n graph, add_rules = rel_extract.fetch_triplets(sents, nx.DiGraph())\n print(add_rules)\n\n", "import argparse\nimport numpy as np\nimport random\nfrom time import time\nimport torch\nimport pickle\nimport agent\nfrom utils_twc import extractor\nfrom utils_twc.generic import getUniqueFileHandler\nfrom utils_twc.kg import construct_kg, load_manual_graphs, RelationExtractor\nfrom utils_twc.textworld_utils import get_goal_graph\nfrom utils_twc.nlp import Tokenizer\nfrom games import dataset\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef play(agent, opt, random_action=False):\n filter_examine_cmd = False\n infos_to_request = agent.infos_to_request\n infos_to_request.max_score = True # Needed to normalize the scores.\n game_path = opt.game_dir + \"/\" + (\n str(opt.difficulty_level) + \"/\" + opt.mode if opt.difficulty_level != '' else opt.game_dir + \"/\" + opt.mode )\n manual_world_graphs = {}\n if opt.graph_emb_type and 'world' in opt.graph_type:\n print(\"Loading Knowledge Graph ... \", end='')\n agent.kg_graph, _, _= construct_kg(game_path + '/conceptnet_subgraph.txt')\n print(' DONE')\n # optional: Use complete or brief manually extracted conceptnet subgraph for the agent\n print(\"Loading Manual World Graphs ... \", end='')\n manual_world_graphs = load_manual_graphs(game_path + '/manual_subgraph_brief')\n\n if opt.game_name:\n game_path = game_path + \"/\"+ opt.game_name\n\n env, game_file_names = dataset.get_game_env(game_path, infos_to_request, opt.max_step_per_episode, opt.batch_size,\n opt.mode, opt.verbose)\n # Get Goals as graphs\n goal_graphs = {}\n for game_file in env.gamefiles:\n goal_graph = get_goal_graph(game_file)\n if goal_graph:\n game_id = game_file.split('-')[-1].split('.')[0]\n goal_graphs[game_id] = goal_graph\n\n # Collect some statistics: nb_steps, final reward.\n total_games_count = len(game_file_names)\n game_identifiers, avg_moves, avg_scores, avg_norm_scores, max_poss_scores = [], [], [], [], []\n\n for no_episode in (range(opt.nepisodes)):\n if not random_action:\n random.seed(no_episode)\n np.random.seed(no_episode)\n torch.manual_seed(no_episode)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(no_episode)\n env.seed(no_episode)\n\n agent.start_episode(opt.batch_size)\n avg_eps_moves, avg_eps_scores, avg_eps_norm_scores = [], [], []\n num_games = total_games_count\n game_max_scores = []\n game_names = []\n while num_games > 0:\n obs, infos = env.reset() # Start new episode.\n if filter_examine_cmd:\n for commands_ in infos[\"admissible_commands\"]: # [open refri, take apple from refrigeration]\n for cmd_ in [cmd for cmd in commands_ if cmd.split()[0] in [\"examine\", \"look\"]]:\n commands_.remove(cmd_)\n\n batch_size = len(obs)\n num_games -= len(obs)\n game_goal_graphs = [None] * batch_size\n max_scores = []\n game_ids = []\n game_manual_world_graph = [None] * batch_size\n for b, game in enumerate(infos[\"game\"]):\n max_scores.append(game.max_score)\n if \"uuid\" in game.metadata:\n game_id = game.metadata[\"uuid\"].split(\"-\")[-1]\n game_ids.append(game_id)\n game_names.append(game_id)\n game_max_scores.append(game.max_score)\n if len(goal_graphs):\n game_goal_graphs[b] = goal_graphs[game_id]\n if len(manual_world_graphs):\n game_manual_world_graph[b] = manual_world_graphs[game_id]\n\n if not game_ids:\n game_ids = range(num_games,num_games+batch_size)\n game_names.extend(game_ids)\n\n commands = [\"restart\"]*len(obs)\n scored_commands = [[] for b in range(batch_size)]\n last_scores = [0.0]*len(obs)\n scores = [0.0]*len(obs)\n dones = [False]*len(obs)\n nb_moves = [0]*len(obs)\n infos[\"goal_graph\"] = game_goal_graphs\n infos[\"manual_world_graph\"] = game_manual_world_graph\n agent.reset_parameters(opt.batch_size)\n for step_no in range(opt.max_step_per_episode):\n nb_moves = [step + int(not done) for step, done in zip(nb_moves, dones)]\n\n if agent.graph_emb_type and ('local' in agent.graph_type or 'world' in agent.graph_type):\n # prune_nodes = opt.prune_nodes if no_episode >= opt.prune_episode and no_episode % 25 ==0 and step_no % 10 == 0 else False\n prune_nodes = opt.prune_nodes\n agent.update_current_graph(obs, commands, scored_commands, infos, opt.graph_mode, prune_nodes)\n\n commands = agent.act(obs, scores, dones, infos, scored_commands, random_action)\n obs, scores, dones, infos = env.step(commands)\n infos[\"goal_graph\"] = game_goal_graphs\n infos[\"manual_world_graph\"] = game_manual_world_graph\n\n for b in range(batch_size):\n if scores[b] - last_scores[b] > 0:\n last_scores[b] = scores[b]\n scored_commands[b].append(commands[b])\n\n if all(dones):\n break\n if step_no == opt.max_step_per_episode - 1:\n dones = [True for _ in dones]\n agent.act(obs, scores, dones, infos, scored_commands, random_action) # Let the agent know the game is done.\n\n if opt.verbose:\n print(\".\", end=\"\")\n avg_eps_moves.extend(nb_moves)\n avg_eps_scores.extend(scores)\n avg_eps_norm_scores.extend([score/max_score for score, max_score in zip(scores, max_scores)])\n if opt.verbose:\n print(\"*\", end=\"\")\n agent.end_episode()\n game_identifiers.append(game_names)\n avg_moves.append(avg_eps_moves) # episode x # games\n avg_scores.append(avg_eps_scores)\n avg_norm_scores.append(avg_eps_norm_scores)\n max_poss_scores.append(game_max_scores)\n env.close()\n game_identifiers = np.array(game_identifiers)\n avg_moves = np.array(avg_moves)\n avg_scores = np.array(avg_scores)\n avg_norm_scores = np.array(avg_norm_scores)\n max_poss_scores = np.array(max_poss_scores)\n if opt.verbose:\n idx = np.apply_along_axis(np.argsort, axis=1, arr=game_identifiers)\n game_avg_moves = np.mean(np.array(list(map(lambda x, y: y[x], idx, avg_moves))), axis=0)\n game_norm_scores = np.mean(np.array(list(map(lambda x, y: y[x], idx, avg_norm_scores))), axis=0)\n game_avg_scores = np.mean(np.array(list(map(lambda x, y: y[x], idx, avg_scores))), axis=0)\n\n msg = \"\\nGame Stats:\\n-----------\\n\" + \"\\n\".join(\n \" Game_#{} = Score: {:5.2f} Norm_Score: {:5.2f} Moves: {:5.2f}/{}\".format(game_no,avg_score,\n norm_score, avg_move,\n opt.max_step_per_episode)\n for game_no, (norm_score, avg_score, avg_move) in\n enumerate(zip(game_norm_scores, game_avg_scores, game_avg_moves)))\n\n print(msg)\n\n total_avg_moves = np.mean(game_avg_moves)\n total_avg_scores = np.mean(game_avg_scores)\n total_norm_scores = np.mean(game_norm_scores)\n msg = opt.mode+\" stats: avg. score: {:4.2f}; norm. avg. score: {:4.2f}; avg. steps: {:5.2f}; \\n\"\n print(msg.format(total_avg_scores, total_norm_scores,total_avg_moves))\n\n ## Dump log files ......\n str_result = {opt.mode + 'game_ids': game_identifiers, opt.mode + 'max_scores': max_poss_scores,\n opt.mode + 'scores_runs': avg_scores, opt.mode + 'norm_score_runs': avg_norm_scores,\n opt.mode + 'moves_runs': avg_moves}\n\n results_ofile = getUniqueFileHandler(opt.results_filename + '_' +opt.mode+'_results')\n pickle.dump(str_result, results_ofile)\n return avg_scores, avg_norm_scores, avg_moves\n\n\nif __name__ == '__main__':\n random.seed(42)\n parser = argparse.ArgumentParser(add_help=False)\n\n # game files and other directories\n parser.add_argument('--game_dir', default='./games/twc', help='Location of the game e.g ./games/testbed')\n parser.add_argument('--game_name', help='Name of the game file e.g., kitchen_cleanup_10quest_1.ulx, *.ulx, *.z8')\n parser.add_argument('--results_dir', default='./results', help='Path to the results files')\n parser.add_argument('--logs_dir', default='./logs', help='Path to the logs files')\n\n # optional arguments (if game_name is given) for game files\n parser.add_argument('--batch_size', type=int, default='1', help='Number of the games per batch')\n parser.add_argument('--difficulty_level', default='easy', choices=['easy','medium', 'hard'],\n help='difficulty level of the games')\n\n # Experiments\n parser.add_argument('--initial_seed', type=int, default=42)\n parser.add_argument('--nruns', type=int, default=5)\n parser.add_argument('--runid', type=int, default=0)\n parser.add_argument('--no_train_episodes', type=int, default=100)\n parser.add_argument('--no_eval_episodes', type=int, default=5)\n parser.add_argument('--train_max_step_per_episode', type=int, default=50)\n parser.add_argument('--eval_max_step_per_episode', type=int, default=50)\n parser.add_argument('--verbose', action='store_true', default=True)\n\n parser.add_argument('--hidden_size', type=int, default=300, help='num of hidden units for embeddings')\n parser.add_argument('--hist_scmds_size', type=int, default=3,\n help='Number of recent scored command history to use. Useful when the game has intermediate reward.')\n parser.add_argument('--ngram', type=int, default=3)\n parser.add_argument('--token_extractor', default='max', help='token extractor: (any or max)')\n parser.add_argument('--corenlp_url', default='http://localhost:9000/',\n help='URL for Stanford CoreNLP OpenIE Server for the relation extraction for the local graph')\n\n parser.add_argument('--noun_only_tokens', action='store_true', default=False,\n help=' Allow only noun for the token extractor')\n parser.add_argument('--use_stopword', action='store_true', default=False,\n help=' Use stopwords for the token extractor')\n parser.add_argument('--agent_type', default='knowledgeaware', choices=['random','simple', 'knowledgeaware'],\n help='Agent type for the text world: (random, simple, knowledgeable)')\n parser.add_argument('--graph_type', default='', choices=['', 'local','world'],\n help='What type of graphs to be generated')\n parser.add_argument('--graph_mode', default='evolve', choices=['full', 'evolve'],\n help='Give Full ground truth graph or evolving knowledge graph: (full, evolve)')\n parser.add_argument('--local_evolve_type', default='direct', choices=['direct', 'ground'],\n help='Type of the generated/evolving strategy for local graph')\n parser.add_argument('--world_evolve_type', default='cdc',\n choices=['DC','CDC', 'NG','NG+prune','manual'],\n help='Type of the generated/evolving strategy for world graph')\n parser.add_argument('--prune_nodes', action='store_true', default=False,\n help=' Allow pruning of low-probability nodes in the world-graph')\n parser.add_argument('--prune_start_episode', type=int, default=1, help='Starting the pruning from this episode')\n\n # Embeddings\n parser.add_argument('--emb_loc', default='embeddings/', help='Path to the embedding location')\n parser.add_argument('--word_emb_type', default='glove',\n help='Embedding type for the observation and the actions: ...'\n '(random, glove, numberbatch, fasttext). Use utils_twc.generic.load_embedings ...'\n ' to take car of the custom embedding locations')\n parser.add_argument('--graph_emb_type', help='Knowledge Graph Embedding type for actions: (numberbatch, complex)')\n parser.add_argument('--egreedy_epsilon', type=float, default=0.0, help=\"Epsilon for the e-greedy exploration\")\n\n opt = parser.parse_args()\n print(opt)\n random.seed(opt.initial_seed)\n np.random.seed(opt.initial_seed)\n torch.manual_seed(opt.initial_seed) # For reproducibility\n if torch.cuda.is_available():\n torch.cuda.manual_seed(opt.initial_seed)\n torch.backends.cudnn.deterministic = True\n # yappi.start()\n\n scores_runs = []\n norm_score_runs = []\n moves_runs = []\n test_scores_runs = []\n test_norm_score_runs = []\n test_moves_runs = []\n\n random_action = False\n if opt.agent_type == 'random':\n random_action = True\n opt.graph_emb_type = None\n if opt.agent_type == 'simple':\n opt.graph_type = ''\n opt.graph_emb_type = None\n\n # Reset prune start episodes if pruning is not selected\n if not opt.prune_nodes:\n opt.prune_start_episode = opt.no_train_episodes\n\n tk_extractor = extractor.get_extractor(opt.token_extractor)\n\n results_filename = opt.results_dir + '/' + opt.agent_type + '_' + opt.game_dir.split('/')[-1] + '_' + (\n opt.graph_mode + '_' + opt.graph_type + '_' if opt.graph_type else '') + (\n str(opt.word_emb_type) + '_' if opt.word_emb_type else '') + (\n str(opt.graph_emb_type) + '-' if opt.graph_emb_type else '') + str(\n opt.nruns) + 'runs_' + str(opt.no_train_episodes) + 'episodes_' + str(opt.hist_scmds_size) + 'hsize_' + str(\n opt.egreedy_epsilon) + 'eps_' + opt.difficulty_level+'_' + opt.local_evolve_type+'_' + opt.world_evolve_type + '_' + str(opt.runid) + 'runId'\n opt.results_filename = results_filename\n graph = None\n seeds = [random.randint(1, 100) for _ in range(opt.nruns)]\n for n in range(0, opt.nruns):\n opt.run_no = n\n opt.seed = seeds[n]\n random.seed(opt.seed)\n np.random.seed(opt.seed)\n torch.manual_seed(opt.seed) # For reproducibility\n if torch.cuda.is_available():\n torch.cuda.manual_seed(opt.seed)\n\n tokenizer = Tokenizer(noun_only_tokens=opt.noun_only_tokens, use_stopword=opt.use_stopword, ngram=opt.ngram,\n extractor=tk_extractor)\n rel_extractor = RelationExtractor(tokenizer, openie_url=opt.corenlp_url)\n myagent = agent.KnowledgeAwareAgent(graph, opt, tokenizer,rel_extractor, device)\n myagent.type = opt.agent_type\n\n print(\"Training ...\")\n myagent.train(opt.batch_size) # Tell the agent it should update its parameters.\n opt.mode = \"train\"\n opt.nepisodes = opt.no_train_episodes # for training\n opt.max_step_per_episode=opt.train_max_step_per_episode\n starttime = time()\n print(\"\\n RUN \", n, \"\\n\")\n scores, norm_scores, moves = play(myagent, opt, random_action=random_action)\n print(\"Trained in {:.2f} secs\".format(time() - starttime))\n\n # Save train model\n torch.save(myagent.model.state_dict(), getUniqueFileHandler(results_filename, ext='.pt'))\n\n" ]
[ [ "numpy.min", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.Normalize", "numpy.max", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.show" ], [ "numpy.random.seed", "torch.cuda.manual_seed", "torch.manual_seed", "numpy.apply_along_axis", "numpy.mean", "torch.cuda.is_available", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
714627034/Paddle-Lite
[ "015ba88a4d639db0b73603e37f83e47be041a4eb", "015ba88a4d639db0b73603e37f83e47be041a4eb", "eea59b66f61bb2acad471010c9526eeec43a15ca", "015ba88a4d639db0b73603e37f83e47be041a4eb" ]
[ "lite/tests/unittest_py/op/common/test_unique_with_counts_op_base.py", "lite/tests/unittest_py/op/test_squeeze_op.py", "lite/tests/unittest_py/op/common/test_expand_as_op_base.py", "lite/tests/unittest_py/op/test_compare_less_op.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.append('..')\n\nfrom program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\nimport numpy as np\nfrom functools import partial\nfrom typing import Optional, List, Callable, Dict, Any, Set\nimport unittest\nimport hypothesis\nimport hypothesis.strategies as st\n\n\ndef sample_program_configs(draw):\n in_shape = draw(\n st.lists(\n st.integers(\n min_value=2, max_value=100), min_size=1, max_size=1))\n\n def generate_IndexTensor():\n return np.random.randint(1, 5, size=in_shape).astype(np.int32)\n\n unique_with_counts_op = OpConfig(\n type=\"unique_with_counts\",\n inputs={\"X\": [\"input_data\"]},\n outputs={\n \"Out\": [\"output_data\"],\n \"Index\": [\"Index_data\"],\n \"Count\": [\"Count_data\"]\n },\n attrs={\"dtype\": 2})\n program_config = ProgramConfig(\n ops=[unique_with_counts_op],\n weights={\n \"Index_data\": TensorConfig(data_gen=partial(generate_IndexTensor))\n },\n inputs={\"input_data\": TensorConfig(shape=in_shape), },\n outputs=[\"output_data\", \"Index_data\", \"Count_data\"])\n return program_config\n", "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.append('../')\n\nfrom auto_scan_test import AutoScanTest, IgnoreReasons\nfrom program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\nimport unittest\nfrom functools import partial\nimport hypothesis\nfrom hypothesis import given, settings, seed, example, assume\nimport hypothesis.strategies as st\nimport numpy as np\n\n\nclass TestSqueezeOp(AutoScanTest):\n def __init__(self, *args, **kwargs):\n AutoScanTest.__init__(self, *args, **kwargs)\n self.enable_testing_on_place(\n TargetType.Host,\n PrecisionType.Any,\n DataLayoutType.NCHW,\n thread=[1, 4])\n opencl_places = [\n Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),\n Place(TargetType.OpenCL, PrecisionType.FP16,\n DataLayoutType.ImageDefault), Place(\n TargetType.OpenCL, PrecisionType.FP16,\n DataLayoutType.ImageFolder), Place(\n TargetType.OpenCL, PrecisionType.Any,\n DataLayoutType.ImageDefault), Place(\n TargetType.OpenCL, PrecisionType.Any,\n DataLayoutType.ImageFolder),\n Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),\n Place(TargetType.Host, PrecisionType.FP32)\n ]\n self.enable_testing_on_place(places=opencl_places)\n self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)\n self.enable_devices_on_nnadapter(device_names=[\n \"kunlunxin_xtcl\", \"cambricon_mlu\", \"nvidia_tensorrt\"\n ])\n\n def is_program_valid(self,\n program_config: ProgramConfig,\n predictor_config: CxxConfig) -> bool:\n #check config\n x_dtype = program_config.inputs[\"input_data\"].dtype\n if predictor_config.target() == TargetType.OpenCL:\n if x_dtype == np.int32 or x_dtype == np.int64:\n return False\n return True\n\n def sample_program_configs(self, draw):\n in_shape = draw(\n st.lists(\n st.integers(\n min_value=1, max_value=32), min_size=1, max_size=4))\n input_type = draw(st.sampled_from([\"float32\", \"int32\", \"int64\"]))\n input_axis = draw(st.sampled_from([[0, 1], [2, 3]]))\n assume(len(input_axis) <= len(in_shape))\n if len(input_axis) > 0:\n for num in input_axis:\n num = num if num >= 0 else num + len(in_shape)\n assume(num < len(in_shape))\n # \"nvidia_tensorrt\" must satisfies theses \n if self.get_nnadapter_device_name() == \"nvidia_tensorrt\":\n for i in range(len(input_axis)):\n in_shape[input_axis[i]] = 1\n input_type = \"float32\"\n\n def generate_input(*args, **kwargs):\n if input_type == \"float32\":\n return np.random.normal(1.0, 6.0, in_shape).astype(np.float32)\n elif input_type == \"int32\":\n return np.random.normal(1.0, 6.0, in_shape).astype(np.int32)\n elif input_type == \"int64\":\n return np.random.normal(1.0, 6.0, in_shape).astype(np.int64)\n\n ops_config = OpConfig(\n type=\"squeeze\",\n inputs={\"X\": [\"input_data\"]},\n outputs={\"Out\": [\"output_data\"]},\n attrs={\"axes\": input_axis})\n\n ops_config.outputs_dtype = {\"output_data\": input_type}\n\n program_config = ProgramConfig(\n ops=[ops_config],\n weights={},\n inputs={\n \"input_data\": TensorConfig(data_gen=partial(generate_input))\n },\n outputs=[\"output_data\"])\n\n return program_config\n\n def sample_predictor_configs(self):\n return self.get_predictor_configs(), [\"squeeze\"], (1e-5, 1e-5)\n\n def add_ignore_pass_case(self):\n def teller1(program_config, predictor_config):\n if self.get_nnadapter_device_name() == \"nvidia_tensorrt\":\n in_shape = program_config.inputs[\"input_data\"].shape\n axes = program_config.ops[0].attrs[\"axes\"]\n if len(in_shape) == 1 \\\n or 0 in axes \\\n or -len(in_shape) in axes:\n return True\n\n self.add_ignore_check_case(\n teller1, IgnoreReasons.PADDLELITE_NOT_SUPPORT,\n \"Lite does not support 'in_shape_size == 1' or 'axes has 0' on nvidia_tensorrt.\"\n )\n\n def test(self, *args, **kwargs):\n self.run_and_statis(quant=False, max_examples=100)\n\n\nif __name__ == \"__main__\":\n unittest.main(argv=[''])\n", "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.append('..')\n\nfrom program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\nimport numpy as np\nfrom functools import partial\nfrom typing import Optional, List, Callable, Dict, Any, Set\nimport unittest\nimport hypothesis\nimport hypothesis.strategies as st\n\n\ndef sample_program_configs(draw):\n in_shape = draw(\n st.lists(\n st.integers(\n min_value=1, max_value=8), min_size=3, max_size=4))\n target1 = []\n target2 = []\n target3 = []\n for i in range(len(in_shape)):\n target1.append(in_shape[i] * (i + 1))\n target2.append(in_shape[i] * (i + 1) * 2)\n target3.append(in_shape[i] * (i + 1) * 3)\n target_shape = draw(st.sampled_from([target1, target2, target3]))\n\n def generate_input_int64(*args, **kwargs):\n return np.random.random(in_shape).astype(np.int64)\n\n def generate_input_float32(*args, **kwargs):\n return np.random.random(in_shape).astype(np.float32)\n\n input_type = draw(\n st.sampled_from([generate_input_int64, generate_input_float32]))\n\n def generate_target(*args, **kwargs):\n if input_type == generate_input_int64:\n return np.random.random(target_shape).astype(np.int64)\n else:\n return np.random.random(target_shape).astype(np.float32)\n\n expand_as_op = OpConfig(\n type=\"expand_as\",\n inputs={\"X\": [\"input_data\"],\n \"target_tensor\": [\"target_data\"]},\n outputs={\"Out\": [\"output_data\"]},\n attrs={})\n\n program_config = ProgramConfig(\n ops=[expand_as_op],\n weights={},\n inputs={\n \"input_data\": TensorConfig(data_gen=partial(input_type)),\n \"target_data\": TensorConfig(data_gen=partial(generate_target))\n },\n outputs=[\"output_data\"])\n return program_config\n", "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nfrom numpy.lib.function_base import place\nsys.path.append('../')\n\nfrom auto_scan_test import AutoScanTest, IgnoreReasons\nfrom program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\nimport unittest\n\nimport hypothesis\nfrom hypothesis import given, settings, seed, example, assume\nimport hypothesis.strategies as st\nimport argparse\nimport numpy as np\nfrom functools import partial\n\n\nclass TestCompareLessOp(AutoScanTest):\n def __init__(self, *args, **kwargs):\n AutoScanTest.__init__(self, *args, **kwargs)\n host_op_config = [\n Place(TargetType.Host, PrecisionType.Any, DataLayoutType.NCHW),\n Place(TargetType.Host, PrecisionType.FP32, DataLayoutType.Any)\n ]\n self.enable_testing_on_place(places=host_op_config)\n self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)\n self.enable_devices_on_nnadapter(device_names=[\"cambricon_mlu\"])\n\n def is_program_valid(self,\n program_config: ProgramConfig,\n predictor_config: CxxConfig) -> bool:\n return True\n\n def sample_program_configs(self, draw):\n in_shape = draw(\n st.lists(\n st.integers(\n min_value=3, max_value=10), min_size=3, max_size=4))\n axis = draw(st.sampled_from([-1, 0, 1, 2]))\n op_type_str = draw(st.sampled_from([\"less_equal\", \"less_than\"]))\n process_type = draw(\n st.sampled_from([\"type_int64\", \"type_float\", \"type_int32\"]))\n\n if axis == -1:\n in_shape_y = in_shape\n else:\n in_shape_y = in_shape[axis:]\n\n def generate_data(type, size_list):\n if type == \"type_int32\":\n return np.random.randint(\n low=0, high=100, size=size_list).astype(np.int32)\n elif type == \"type_int64\":\n return np.random.randint(\n low=0, high=100, size=size_list).astype(np.int64)\n elif type == \"type_float\":\n return np.random.random(size=size_list).astype(np.float32)\n\n def generate_input_x():\n return generate_data(process_type, in_shape)\n\n def generate_input_y():\n return generate_data(process_type, in_shape_y)\n\n build_ops = OpConfig(\n type=op_type_str,\n inputs={\"X\": [\"data_x\"],\n \"Y\": [\"data_y\"]},\n outputs={\"Out\": [\"output_data\"], },\n attrs={\"axis\": axis,\n \"force_cpu\": True})\n build_ops.outputs_dtype = {\"output_data\": np.bool_}\n\n cast_out = OpConfig(\n type=\"cast\",\n inputs={\"X\": [\"output_data\"], },\n outputs={\"Out\": [\"cast_data_out\"], },\n attrs={\"in_dtype\": int(0),\n \"out_dtype\": int(2)})\n cast_out.outputs_dtype = {\"cast_data_out\": np.int32}\n\n program_config = ProgramConfig(\n ops=[build_ops, cast_out],\n weights={},\n inputs={\n \"data_x\": TensorConfig(data_gen=partial(generate_input_x)),\n \"data_y\": TensorConfig(data_gen=partial(generate_input_y)),\n },\n outputs=[\"cast_data_out\"])\n return program_config\n\n def sample_predictor_configs(self):\n return self.get_predictor_configs(), [\"less_equal_and_than\"], (1e-5,\n 1e-5)\n\n def add_ignore_pass_case(self):\n pass\n\n def test(self, *args, **kwargs):\n self.run_and_statis(quant=False, max_examples=60)\n\n\nif __name__ == \"__main__\":\n unittest.main(argv=[''])\n" ]
[ [ "numpy.random.randint" ], [ "numpy.random.normal" ], [ "numpy.random.random" ], [ "numpy.random.random", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lqkweb/learnMLflow
[ "13c5decaebba95b1b90f92021be35e343b4764af", "13c5decaebba95b1b90f92021be35e343b4764af", "13c5decaebba95b1b90f92021be35e343b4764af", "13c5decaebba95b1b90f92021be35e343b4764af", "13c5decaebba95b1b90f92021be35e343b4764af" ]
[ "scikit-learn-master/sklearn/linear_model/ridge.py", "scikit-learn-master/sklearn/utils/tests/test_utils.py", "scikit-learn-master/sklearn/linear_model/least_angle.py", "scikit-learn-master/sklearn/datasets/tests/test_base.py", "scikit-learn-master/sklearn/ensemble/base.py" ]
[ "\"\"\"\nRidge regression\n\"\"\"\n\n# Author: Mathieu Blondel <[email protected]>\n# Reuben Fletcher-Costin <[email protected]>\n# Fabian Pedregosa <[email protected]>\n# Michael Eickenberg <[email protected]>\n# License: BSD 3 clause\n\n\nfrom abc import ABCMeta, abstractmethod\nimport warnings\n\nimport numpy as np\nfrom scipy import linalg\nfrom scipy import sparse\nfrom scipy.sparse import linalg as sp_linalg\n\nfrom .base import LinearClassifierMixin, LinearModel, _rescale_data\nfrom .sag import sag_solver\nfrom ..base import RegressorMixin\nfrom ..utils.extmath import safe_sparse_dot\nfrom ..utils.extmath import row_norms\nfrom ..utils import check_X_y\nfrom ..utils import check_array\nfrom ..utils import check_consistent_length\nfrom ..utils import compute_sample_weight\nfrom ..utils import column_or_1d\nfrom ..preprocessing import LabelBinarizer\nfrom ..model_selection import GridSearchCV\nfrom ..metrics.scorer import check_scoring\nfrom ..exceptions import ConvergenceWarning\n\n\ndef _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):\n n_samples, n_features = X.shape\n X1 = sp_linalg.aslinearoperator(X)\n coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)\n\n if n_features > n_samples:\n def create_mv(curr_alpha):\n def _mv(x):\n return X1.matvec(X1.rmatvec(x)) + curr_alpha * x\n return _mv\n else:\n def create_mv(curr_alpha):\n def _mv(x):\n return X1.rmatvec(X1.matvec(x)) + curr_alpha * x\n return _mv\n\n for i in range(y.shape[1]):\n y_column = y[:, i]\n\n mv = create_mv(alpha[i])\n if n_features > n_samples:\n # kernel ridge\n # w = X.T * inv(X X^t + alpha*Id) y\n C = sp_linalg.LinearOperator(\n (n_samples, n_samples), matvec=mv, dtype=X.dtype)\n # FIXME atol\n try:\n coef, info = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')\n except TypeError:\n # old scipy\n coef, info = sp_linalg.cg(C, y_column, tol=tol)\n coefs[i] = X1.rmatvec(coef)\n else:\n # linear ridge\n # w = inv(X^t X + alpha*Id) * X.T y\n y_column = X1.rmatvec(y_column)\n C = sp_linalg.LinearOperator(\n (n_features, n_features), matvec=mv, dtype=X.dtype)\n # FIXME atol\n try:\n coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,\n tol=tol, atol='legacy')\n except TypeError:\n # old scipy\n coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,\n tol=tol)\n\n if info < 0:\n raise ValueError(\"Failed with error code %d\" % info)\n\n if max_iter is None and info > 0 and verbose:\n warnings.warn(\"sparse_cg did not converge after %d iterations.\" %\n info, ConvergenceWarning)\n\n return coefs\n\n\ndef _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):\n n_samples, n_features = X.shape\n coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)\n n_iter = np.empty(y.shape[1], dtype=np.int32)\n\n # According to the lsqr documentation, alpha = damp^2.\n sqrt_alpha = np.sqrt(alpha)\n\n for i in range(y.shape[1]):\n y_column = y[:, i]\n info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],\n atol=tol, btol=tol, iter_lim=max_iter)\n coefs[i] = info[0]\n n_iter[i] = info[2]\n\n return coefs, n_iter\n\n\ndef _solve_cholesky(X, y, alpha):\n # w = inv(X^t X + alpha*Id) * X.T y\n n_samples, n_features = X.shape\n n_targets = y.shape[1]\n\n A = safe_sparse_dot(X.T, X, dense_output=True)\n Xy = safe_sparse_dot(X.T, y, dense_output=True)\n\n one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])\n\n if one_alpha:\n A.flat[::n_features + 1] += alpha[0]\n return linalg.solve(A, Xy, sym_pos=True,\n overwrite_a=True).T\n else:\n coefs = np.empty([n_targets, n_features], dtype=X.dtype)\n for coef, target, current_alpha in zip(coefs, Xy.T, alpha):\n A.flat[::n_features + 1] += current_alpha\n coef[:] = linalg.solve(A, target, sym_pos=True,\n overwrite_a=False).ravel()\n A.flat[::n_features + 1] -= current_alpha\n return coefs\n\n\ndef _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):\n # dual_coef = inv(X X^t + alpha*Id) y\n n_samples = K.shape[0]\n n_targets = y.shape[1]\n\n if copy:\n K = K.copy()\n\n alpha = np.atleast_1d(alpha)\n one_alpha = (alpha == alpha[0]).all()\n has_sw = isinstance(sample_weight, np.ndarray) \\\n or sample_weight not in [1.0, None]\n\n if has_sw:\n # Unlike other solvers, we need to support sample_weight directly\n # because K might be a pre-computed kernel.\n sw = np.sqrt(np.atleast_1d(sample_weight))\n y = y * sw[:, np.newaxis]\n K *= np.outer(sw, sw)\n\n if one_alpha:\n # Only one penalty, we can solve multi-target problems in one time.\n K.flat[::n_samples + 1] += alpha[0]\n\n try:\n # Note: we must use overwrite_a=False in order to be able to\n # use the fall-back solution below in case a LinAlgError\n # is raised\n dual_coef = linalg.solve(K, y, sym_pos=True,\n overwrite_a=False)\n except np.linalg.LinAlgError:\n warnings.warn(\"Singular matrix in solving dual problem. Using \"\n \"least-squares solution instead.\")\n dual_coef = linalg.lstsq(K, y)[0]\n\n # K is expensive to compute and store in memory so change it back in\n # case it was user-given.\n K.flat[::n_samples + 1] -= alpha[0]\n\n if has_sw:\n dual_coef *= sw[:, np.newaxis]\n\n return dual_coef\n else:\n # One penalty per target. We need to solve each target separately.\n dual_coefs = np.empty([n_targets, n_samples], K.dtype)\n\n for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):\n K.flat[::n_samples + 1] += current_alpha\n\n dual_coef[:] = linalg.solve(K, target, sym_pos=True,\n overwrite_a=False).ravel()\n\n K.flat[::n_samples + 1] -= current_alpha\n\n if has_sw:\n dual_coefs *= sw[np.newaxis, :]\n\n return dual_coefs.T\n\n\ndef _solve_svd(X, y, alpha):\n U, s, Vt = linalg.svd(X, full_matrices=False)\n idx = s > 1e-15 # same default value as scipy.linalg.pinv\n s_nnz = s[idx][:, np.newaxis]\n UTy = np.dot(U.T, y)\n d = np.zeros((s.size, alpha.size), dtype=X.dtype)\n d[idx] = s_nnz / (s_nnz ** 2 + alpha)\n d_UT_y = d * UTy\n return np.dot(Vt.T, d_UT_y).T\n\n\ndef ridge_regression(X, y, alpha, sample_weight=None, solver='auto',\n max_iter=None, tol=1e-3, verbose=0, random_state=None,\n return_n_iter=False, return_intercept=False):\n \"\"\"Solve the ridge equation by the method of normal equations.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, LinearOperator},\n shape = [n_samples, n_features]\n Training data\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values\n\n alpha : {float, array-like},\n shape = [n_targets] if array-like\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as\n LogisticRegression or LinearSVC. If an array is passed, penalties are\n assumed to be specific to the targets. Hence they must correspond in\n number.\n\n sample_weight : float or numpy array of shape [n_samples]\n Individual weights for each sample. If sample_weight is not None and\n solver='auto', the solver will be set to 'cholesky'.\n\n .. versionadded:: 0.17\n\n solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}\n Solver to use in the computational routines:\n\n - 'auto' chooses the solver automatically based on the type of data.\n\n - 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. More stable for singular matrices than\n 'cholesky'.\n\n - 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution via a Cholesky decomposition of\n dot(X.T, X)\n\n - 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n - 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative\n procedure.\n\n - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses\n its improved, unbiased version named SAGA. Both methods also use an\n iterative procedure, and are often faster than other solvers when\n both n_samples and n_features are large. Note that 'sag' and\n 'saga' fast convergence is only guaranteed on features with\n approximately the same scale. You can preprocess the data with a\n scaler from sklearn.preprocessing.\n\n\n All last five solvers support both dense and sparse data. However, only\n 'sag' and 'saga' supports sparse input when`fit_intercept` is True.\n\n .. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n .. versionadded:: 0.19\n SAGA solver.\n\n max_iter : int, optional\n Maximum number of iterations for conjugate gradient solver.\n For the 'sparse_cg' and 'lsqr' solvers, the default value is determined\n by scipy.sparse.linalg. For 'sag' and saga solver, the default value is\n 1000.\n\n tol : float\n Precision of the solution.\n\n verbose : int\n Verbosity level. Setting verbose > 0 will display additional\n information depending on the solver used.\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator to use when shuffling\n the data. If int, random_state is the seed used by the random number\n generator; If RandomState instance, random_state is the random number\n generator; If None, the random number generator is the RandomState\n instance used by `np.random`. Used when ``solver`` == 'sag'.\n\n return_n_iter : boolean, default False\n If True, the method also returns `n_iter`, the actual number of\n iteration performed by the solver.\n\n .. versionadded:: 0.17\n\n return_intercept : boolean, default False\n If True and if X is sparse, the method also returns the intercept,\n and the solver is automatically changed to 'sag'. This is only a\n temporary fix for fitting the intercept with sparse data. For dense\n data, use sklearn.linear_model._preprocess_data before your regression.\n\n .. versionadded:: 0.17\n\n Returns\n -------\n coef : array, shape = [n_features] or [n_targets, n_features]\n Weight vector(s).\n\n n_iter : int, optional\n The actual number of iteration performed by the solver.\n Only returned if `return_n_iter` is True.\n\n intercept : float or array, shape = [n_targets]\n The intercept of the model. Only returned if `return_intercept`\n is True and if X is a scipy sparse array.\n\n Notes\n -----\n This function won't compute the intercept.\n \"\"\"\n if return_intercept and sparse.issparse(X) and solver != 'sag':\n if solver != 'auto':\n warnings.warn(\"In Ridge, only 'sag' solver can currently fit the \"\n \"intercept when X is sparse. Solver has been \"\n \"automatically changed into 'sag'.\")\n solver = 'sag'\n\n _dtype = [np.float64, np.float32]\n\n # SAG needs X and y columns to be C-contiguous and np.float64\n if solver in ['sag', 'saga']:\n X = check_array(X, accept_sparse=['csr'],\n dtype=np.float64, order='C')\n y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')\n else:\n X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],\n dtype=_dtype)\n y = check_array(y, dtype=X.dtype, ensure_2d=False)\n check_consistent_length(X, y)\n\n n_samples, n_features = X.shape\n\n if y.ndim > 2:\n raise ValueError(\"Target y has the wrong shape %s\" % str(y.shape))\n\n ravel = False\n if y.ndim == 1:\n y = y.reshape(-1, 1)\n ravel = True\n\n n_samples_, n_targets = y.shape\n\n if n_samples != n_samples_:\n raise ValueError(\"Number of samples in X and y does not correspond:\"\n \" %d != %d\" % (n_samples, n_samples_))\n\n has_sw = sample_weight is not None\n\n if solver == 'auto':\n # cholesky if it's a dense array and cg in any other case\n if not sparse.issparse(X) or has_sw:\n solver = 'cholesky'\n else:\n solver = 'sparse_cg'\n\n if has_sw:\n if np.atleast_1d(sample_weight).ndim > 1:\n raise ValueError(\"Sample weights must be 1D array or scalar\")\n\n if solver not in ['sag', 'saga']:\n # SAG supports sample_weight directly. For other solvers,\n # we implement sample_weight via a simple rescaling.\n X, y = _rescale_data(X, y, sample_weight)\n\n # There should be either 1 or n_targets penalties\n alpha = np.asarray(alpha, dtype=X.dtype).ravel()\n if alpha.size not in [1, n_targets]:\n raise ValueError(\"Number of targets and number of penalties \"\n \"do not correspond: %d != %d\"\n % (alpha.size, n_targets))\n\n if alpha.size == 1 and n_targets > 1:\n alpha = np.repeat(alpha, n_targets)\n\n if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga'):\n raise ValueError('Solver %s not understood' % solver)\n\n n_iter = None\n if solver == 'sparse_cg':\n coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)\n\n elif solver == 'lsqr':\n coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)\n\n elif solver == 'cholesky':\n if n_features > n_samples:\n K = safe_sparse_dot(X, X.T, dense_output=True)\n try:\n dual_coef = _solve_cholesky_kernel(K, y, alpha)\n\n coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T\n except linalg.LinAlgError:\n # use SVD solver if matrix is singular\n solver = 'svd'\n\n else:\n try:\n coef = _solve_cholesky(X, y, alpha)\n except linalg.LinAlgError:\n # use SVD solver if matrix is singular\n solver = 'svd'\n\n elif solver in ['sag', 'saga']:\n # precompute max_squared_sum for all targets\n max_squared_sum = row_norms(X, squared=True).max()\n\n coef = np.empty((y.shape[1], n_features))\n n_iter = np.empty(y.shape[1], dtype=np.int32)\n intercept = np.zeros((y.shape[1], ))\n for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):\n init = {'coef': np.zeros((n_features + int(return_intercept), 1))}\n coef_, n_iter_, _ = sag_solver(\n X, target.ravel(), sample_weight, 'squared', alpha_i, 0,\n max_iter, tol, verbose, random_state, False, max_squared_sum,\n init,\n is_saga=solver == 'saga')\n if return_intercept:\n coef[i] = coef_[:-1]\n intercept[i] = coef_[-1]\n else:\n coef[i] = coef_\n n_iter[i] = n_iter_\n\n if intercept.shape[0] == 1:\n intercept = intercept[0]\n coef = np.asarray(coef)\n\n if solver == 'svd':\n if sparse.issparse(X):\n raise TypeError('SVD solver does not support sparse'\n ' inputs currently')\n coef = _solve_svd(X, y, alpha)\n\n if ravel:\n # When y was passed as a 1d-array, we flatten the coefficients.\n coef = coef.ravel()\n\n if return_n_iter and return_intercept:\n return coef, n_iter, intercept\n elif return_intercept:\n return coef, intercept\n elif return_n_iter:\n return coef, n_iter\n else:\n return coef\n\n\nclass _BaseRidge(LinearModel, metaclass=ABCMeta):\n\n @abstractmethod\n def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,\n copy_X=True, max_iter=None, tol=1e-3, solver=\"auto\",\n random_state=None):\n self.alpha = alpha\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.copy_X = copy_X\n self.max_iter = max_iter\n self.tol = tol\n self.solver = solver\n self.random_state = random_state\n\n def fit(self, X, y, sample_weight=None):\n\n if self.solver in ('sag', 'saga'):\n _dtype = np.float64\n else:\n # all other solvers work at both float precision levels\n _dtype = [np.float64, np.float32]\n\n X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=_dtype,\n multi_output=True, y_numeric=True)\n\n if ((sample_weight is not None) and\n np.atleast_1d(sample_weight).ndim > 1):\n raise ValueError(\"Sample weights must be 1D array or scalar\")\n\n X, y, X_offset, y_offset, X_scale = self._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X,\n sample_weight=sample_weight)\n\n # temporary fix for fitting the intercept with sparse data using 'sag'\n if sparse.issparse(X) and self.fit_intercept:\n self.coef_, self.n_iter_, self.intercept_ = ridge_regression(\n X, y, alpha=self.alpha, sample_weight=sample_weight,\n max_iter=self.max_iter, tol=self.tol, solver=self.solver,\n random_state=self.random_state, return_n_iter=True,\n return_intercept=True)\n self.intercept_ += y_offset\n else:\n self.coef_, self.n_iter_ = ridge_regression(\n X, y, alpha=self.alpha, sample_weight=sample_weight,\n max_iter=self.max_iter, tol=self.tol, solver=self.solver,\n random_state=self.random_state, return_n_iter=True,\n return_intercept=False)\n self._set_intercept(X_offset, y_offset, X_scale)\n\n return self\n\n\nclass Ridge(_BaseRidge, RegressorMixin):\n \"\"\"Linear least squares with l2 regularization.\n\n Minimizes the objective function::\n\n ||y - Xw||^2_2 + alpha * ||w||^2_2\n\n This model solves a regression model where the loss function is\n the linear least squares function and regularization is given by\n the l2-norm. Also known as Ridge Regression or Tikhonov regularization.\n This estimator has built-in support for multi-variate regression\n (i.e., when y is a 2d-array of shape [n_samples, n_targets]).\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alpha : {float, array-like}, shape (n_targets)\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as\n LogisticRegression or LinearSVC. If an array is passed, penalties are\n assumed to be specific to the targets. Hence they must correspond in\n number.\n\n fit_intercept : boolean\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n copy_X : boolean, optional, default True\n If True, X will be copied; else, it may be overwritten.\n\n max_iter : int, optional\n Maximum number of iterations for conjugate gradient solver.\n For 'sparse_cg' and 'lsqr' solvers, the default value is determined\n by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.\n\n tol : float\n Precision of the solution.\n\n solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}\n Solver to use in the computational routines:\n\n - 'auto' chooses the solver automatically based on the type of data.\n\n - 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. More stable for singular matrices than\n 'cholesky'.\n\n - 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution.\n\n - 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n - 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative\n procedure.\n\n - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses\n its improved, unbiased version named SAGA. Both methods also use an\n iterative procedure, and are often faster than other solvers when\n both n_samples and n_features are large. Note that 'sag' and\n 'saga' fast convergence is only guaranteed on features with\n approximately the same scale. You can preprocess the data with a\n scaler from sklearn.preprocessing.\n\n All last five solvers support both dense and sparse data. However,\n only 'sag' and 'saga' supports sparse input when `fit_intercept` is\n True.\n\n .. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n .. versionadded:: 0.19\n SAGA solver.\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator to use when shuffling\n the data. If int, random_state is the seed used by the random number\n generator; If RandomState instance, random_state is the random number\n generator; If None, the random number generator is the RandomState\n instance used by `np.random`. Used when ``solver`` == 'sag'.\n\n .. versionadded:: 0.17\n *random_state* to support Stochastic Average Gradient.\n\n Attributes\n ----------\n coef_ : array, shape (n_features,) or (n_targets, n_features)\n Weight vector(s).\n\n intercept_ : float | array, shape = (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n n_iter_ : array or None, shape (n_targets,)\n Actual number of iterations for each target. Available only for\n sag and lsqr solvers. Other solvers will return None.\n\n .. versionadded:: 0.17\n\n See also\n --------\n RidgeClassifier : Ridge classifier\n RidgeCV : Ridge regression with built-in cross validation\n :class:`sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression\n combines ridge regression with the kernel trick\n\n Examples\n --------\n >>> from sklearn.linear_model import Ridge\n >>> import numpy as np\n >>> n_samples, n_features = 10, 5\n >>> np.random.seed(0)\n >>> y = np.random.randn(n_samples)\n >>> X = np.random.randn(n_samples, n_features)\n >>> clf = Ridge(alpha=1.0)\n >>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE\n Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\n\n \"\"\"\n def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,\n copy_X=True, max_iter=None, tol=1e-3, solver=\"auto\",\n random_state=None):\n super().__init__(\n alpha=alpha, fit_intercept=fit_intercept,\n normalize=normalize, copy_X=copy_X,\n max_iter=max_iter, tol=tol, solver=solver,\n random_state=random_state)\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training data\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values\n\n sample_weight : float or numpy array of shape [n_samples]\n Individual weights for each sample\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n return super().fit(X, y, sample_weight=sample_weight)\n\n\nclass RidgeClassifier(LinearClassifierMixin, _BaseRidge):\n \"\"\"Classifier using Ridge regression.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alpha : float\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as\n LogisticRegression or LinearSVC.\n\n fit_intercept : boolean\n Whether to calculate the intercept for this model. If set to false, no\n intercept will be used in calculations (e.g. data is expected to be\n already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n copy_X : boolean, optional, default True\n If True, X will be copied; else, it may be overwritten.\n\n max_iter : int, optional\n Maximum number of iterations for conjugate gradient solver.\n The default value is determined by scipy.sparse.linalg.\n\n tol : float\n Precision of the solution.\n\n class_weight : dict or 'balanced', optional\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n\n solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}\n Solver to use in the computational routines:\n\n - 'auto' chooses the solver automatically based on the type of data.\n\n - 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. More stable for singular matrices than\n 'cholesky'.\n\n - 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution.\n\n - 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n - 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative\n procedure.\n\n - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses\n its unbiased and more flexible version named SAGA. Both methods\n use an iterative procedure, and are often faster than other solvers\n when both n_samples and n_features are large. Note that 'sag' and\n 'saga' fast convergence is only guaranteed on features with\n approximately the same scale. You can preprocess the data with a\n scaler from sklearn.preprocessing.\n\n .. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n .. versionadded:: 0.19\n SAGA solver.\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator to use when shuffling\n the data. If int, random_state is the seed used by the random number\n generator; If RandomState instance, random_state is the random number\n generator; If None, the random number generator is the RandomState\n instance used by `np.random`. Used when ``solver`` == 'sag'.\n\n Attributes\n ----------\n coef_ : array, shape (n_features,) or (n_classes, n_features)\n Weight vector(s).\n\n intercept_ : float | array, shape = (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n n_iter_ : array or None, shape (n_targets,)\n Actual number of iterations for each target. Available only for\n sag and lsqr solvers. Other solvers will return None.\n\n Examples\n --------\n >>> from sklearn.datasets import load_breast_cancer\n >>> from sklearn.linear_model import RidgeClassifier\n >>> X, y = load_breast_cancer(return_X_y=True)\n >>> clf = RidgeClassifier().fit(X, y)\n >>> clf.score(X, y) # doctest: +ELLIPSIS\n 0.9595...\n\n See also\n --------\n Ridge : Ridge regression\n RidgeClassifierCV : Ridge classifier with built-in cross validation\n\n Notes\n -----\n For multi-class classification, n_class classifiers are trained in\n a one-versus-all approach. Concretely, this is implemented by taking\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n\n def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,\n copy_X=True, max_iter=None, tol=1e-3, class_weight=None,\n solver=\"auto\", random_state=None):\n super().__init__(\n alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,\n copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,\n random_state=random_state)\n self.class_weight = class_weight\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples,n_features]\n Training data\n\n y : array-like, shape = [n_samples]\n Target values\n\n sample_weight : float or numpy array of shape (n_samples,)\n Sample weight.\n\n .. versionadded:: 0.17\n *sample_weight* support to Classifier.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],\n multi_output=True)\n\n self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)\n Y = self._label_binarizer.fit_transform(y)\n if not self._label_binarizer.y_type_.startswith('multilabel'):\n y = column_or_1d(y, warn=True)\n else:\n # we don't (yet) support multi-label classification in Ridge\n raise ValueError(\n \"%s doesn't support multi-label classification\" % (\n self.__class__.__name__))\n\n if self.class_weight:\n if sample_weight is None:\n sample_weight = 1.\n # modify the sample weights with the corresponding class weight\n sample_weight = (sample_weight *\n compute_sample_weight(self.class_weight, y))\n\n super().fit(X, Y, sample_weight=sample_weight)\n return self\n\n @property\n def classes_(self):\n return self._label_binarizer.classes_\n\n\nclass _RidgeGCV(LinearModel):\n \"\"\"Ridge regression with built-in Generalized Cross-Validation\n\n It allows efficient Leave-One-Out cross-validation.\n\n This class is not intended to be used directly. Use RidgeCV instead.\n\n Notes\n -----\n\n We want to solve (K + alpha*Id)c = y,\n where K = X X^T is the kernel matrix.\n\n Let G = (K + alpha*Id)^-1.\n\n Dual solution: c = Gy\n Primal solution: w = X^T c\n\n Compute eigendecomposition K = Q V Q^T.\n Then G = Q (V + alpha*Id)^-1 Q^T,\n where (V + alpha*Id) is diagonal.\n It is thus inexpensive to inverse for many alphas.\n\n Let loov be the vector of prediction values for each example\n when the model was fitted with all examples but this example.\n\n loov = (KGY - diag(KG)Y) / diag(I-KG)\n\n Let looe be the vector of prediction errors for each example\n when the model was fitted with all examples but this example.\n\n looe = y - loov = c / diag(G)\n\n References\n ----------\n http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf\n https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf\n \"\"\"\n\n def __init__(self, alphas=(0.1, 1.0, 10.0),\n fit_intercept=True, normalize=False,\n scoring=None, copy_X=True,\n gcv_mode=None, store_cv_values=False):\n self.alphas = np.asarray(alphas)\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.scoring = scoring\n self.copy_X = copy_X\n self.gcv_mode = gcv_mode\n self.store_cv_values = store_cv_values\n\n def _pre_compute(self, X, y, centered_kernel=True):\n # even if X is very sparse, K is usually very dense\n K = safe_sparse_dot(X, X.T, dense_output=True)\n # the following emulates an additional constant regressor\n # corresponding to fit_intercept=True\n # but this is done only when the features have been centered\n if centered_kernel:\n K += np.ones_like(K)\n v, Q = linalg.eigh(K)\n QT_y = np.dot(Q.T, y)\n return v, Q, QT_y\n\n def _decomp_diag(self, v_prime, Q):\n # compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))\n return (v_prime * Q ** 2).sum(axis=-1)\n\n def _diag_dot(self, D, B):\n # compute dot(diag(D), B)\n if len(B.shape) > 1:\n # handle case where B is > 1-d\n D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]\n return D * B\n\n def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):\n \"\"\"Helper function to avoid code duplication between self._errors and\n self._values.\n\n Notes\n -----\n We don't construct matrix G, instead compute action on y & diagonal.\n \"\"\"\n w = 1. / (v + alpha)\n constant_column = np.var(Q, 0) < 1.e-12\n # detect constant columns\n w[constant_column] = 0 # cancel the regularization for the intercept\n\n c = np.dot(Q, self._diag_dot(w, QT_y))\n G_diag = self._decomp_diag(w, Q)\n # handle case where y is 2-d\n if len(y.shape) != 1:\n G_diag = G_diag[:, np.newaxis]\n return G_diag, c\n\n def _errors(self, alpha, y, v, Q, QT_y):\n G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)\n return (c / G_diag) ** 2, c\n\n def _values(self, alpha, y, v, Q, QT_y):\n G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)\n return y - (c / G_diag), c\n\n def _pre_compute_svd(self, X, y, centered_kernel=True):\n if sparse.issparse(X):\n raise TypeError(\"SVD not supported for sparse matrices\")\n if centered_kernel:\n X = np.hstack((X, np.ones((X.shape[0], 1))))\n # to emulate fit_intercept=True situation, add a column on ones\n # Note that by centering, the other columns are orthogonal to that one\n U, s, _ = linalg.svd(X, full_matrices=0)\n v = s ** 2\n UT_y = np.dot(U.T, y)\n return v, U, UT_y\n\n def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):\n \"\"\"Helper function to avoid code duplication between self._errors_svd\n and self._values_svd.\n \"\"\"\n constant_column = np.var(U, 0) < 1.e-12\n # detect columns colinear to ones\n w = ((v + alpha) ** -1) - (alpha ** -1)\n w[constant_column] = - (alpha ** -1)\n # cancel the regularization for the intercept\n c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y\n G_diag = self._decomp_diag(w, U) + (alpha ** -1)\n if len(y.shape) != 1:\n # handle case where y is 2-d\n G_diag = G_diag[:, np.newaxis]\n return G_diag, c\n\n def _errors_svd(self, alpha, y, v, U, UT_y):\n G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)\n return (c / G_diag) ** 2, c\n\n def _values_svd(self, alpha, y, v, U, UT_y):\n G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)\n return y - (c / G_diag), c\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training data\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values. Will be cast to X's dtype if necessary\n\n sample_weight : float or array-like of shape [n_samples]\n Sample weight\n\n Returns\n -------\n self : object\n \"\"\"\n X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,\n multi_output=True, y_numeric=True)\n if sample_weight is not None and not isinstance(sample_weight, float):\n sample_weight = check_array(sample_weight, ensure_2d=False)\n n_samples, n_features = X.shape\n\n X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X,\n sample_weight=sample_weight)\n\n gcv_mode = self.gcv_mode\n with_sw = len(np.shape(sample_weight))\n\n if gcv_mode is None or gcv_mode == 'auto':\n if sparse.issparse(X) or n_features > n_samples or with_sw:\n gcv_mode = 'eigen'\n else:\n gcv_mode = 'svd'\n elif gcv_mode == \"svd\" and with_sw:\n # FIXME non-uniform sample weights not yet supported\n warnings.warn(\"non-uniform sample weights unsupported for svd, \"\n \"forcing usage of eigen\")\n gcv_mode = 'eigen'\n\n if gcv_mode == 'eigen':\n _pre_compute = self._pre_compute\n _errors = self._errors\n _values = self._values\n elif gcv_mode == 'svd':\n # assert n_samples >= n_features\n _pre_compute = self._pre_compute_svd\n _errors = self._errors_svd\n _values = self._values_svd\n else:\n raise ValueError('bad gcv_mode \"%s\"' % gcv_mode)\n\n if sample_weight is not None:\n X, y = _rescale_data(X, y, sample_weight)\n\n centered_kernel = not sparse.issparse(X) and self.fit_intercept\n\n v, Q, QT_y = _pre_compute(X, y, centered_kernel)\n n_y = 1 if len(y.shape) == 1 else y.shape[1]\n cv_values = np.zeros((n_samples * n_y, len(self.alphas)))\n C = []\n\n scorer = check_scoring(self, scoring=self.scoring, allow_none=True)\n error = scorer is None\n\n if np.any(self.alphas < 0):\n raise ValueError(\"alphas cannot be negative. \"\n \"Got {} containing some \"\n \"negative value instead.\".format(self.alphas))\n\n for i, alpha in enumerate(self.alphas):\n if error:\n out, c = _errors(float(alpha), y, v, Q, QT_y)\n else:\n out, c = _values(float(alpha), y, v, Q, QT_y)\n cv_values[:, i] = out.ravel()\n C.append(c)\n\n if error:\n best = cv_values.mean(axis=0).argmin()\n else:\n # The scorer want an object that will make the predictions but\n # they are already computed efficiently by _RidgeGCV. This\n # identity_estimator will just return them\n def identity_estimator():\n pass\n identity_estimator.decision_function = lambda y_predict: y_predict\n identity_estimator.predict = lambda y_predict: y_predict\n\n out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])\n for i in range(len(self.alphas))]\n best = np.argmax(out)\n\n self.alpha_ = self.alphas[best]\n self.dual_coef_ = C[best]\n self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)\n\n self._set_intercept(X_offset, y_offset, X_scale)\n\n if self.store_cv_values:\n if len(y.shape) == 1:\n cv_values_shape = n_samples, len(self.alphas)\n else:\n cv_values_shape = n_samples, n_y, len(self.alphas)\n self.cv_values_ = cv_values.reshape(cv_values_shape)\n\n return self\n\n\nclass _BaseRidgeCV(LinearModel):\n def __init__(self, alphas=(0.1, 1.0, 10.0),\n fit_intercept=True, normalize=False, scoring=None,\n cv=None, gcv_mode=None,\n store_cv_values=False):\n self.alphas = np.asarray(alphas)\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.scoring = scoring\n self.cv = cv\n self.gcv_mode = gcv_mode\n self.store_cv_values = store_cv_values\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Training data\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values. Will be cast to X's dtype if necessary\n\n sample_weight : float or array-like of shape [n_samples]\n Sample weight\n\n Returns\n -------\n self : object\n \"\"\"\n if self.cv is None:\n estimator = _RidgeGCV(self.alphas,\n fit_intercept=self.fit_intercept,\n normalize=self.normalize,\n scoring=self.scoring,\n gcv_mode=self.gcv_mode,\n store_cv_values=self.store_cv_values)\n estimator.fit(X, y, sample_weight=sample_weight)\n self.alpha_ = estimator.alpha_\n if self.store_cv_values:\n self.cv_values_ = estimator.cv_values_\n else:\n if self.store_cv_values:\n raise ValueError(\"cv!=None and store_cv_values=True \"\n \" are incompatible\")\n parameters = {'alpha': self.alphas}\n gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept,\n normalize=self.normalize),\n parameters, cv=self.cv, scoring=self.scoring)\n gs.fit(X, y, sample_weight=sample_weight)\n estimator = gs.best_estimator_\n self.alpha_ = gs.best_estimator_.alpha\n\n self.coef_ = estimator.coef_\n self.intercept_ = estimator.intercept_\n\n return self\n\n\nclass RidgeCV(_BaseRidgeCV, RegressorMixin):\n \"\"\"Ridge regression with built-in cross-validation.\n\n See glossary entry for :term:`cross-validation estimator`.\n\n By default, it performs Generalized Cross-Validation, which is a form of\n efficient Leave-One-Out cross-validation.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alphas : numpy array of shape [n_alphas]\n Array of alpha values to try.\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as\n LogisticRegression or LinearSVC.\n\n fit_intercept : boolean\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the efficient Leave-One-Out cross-validation\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`sklearn.model_selection.StratifiedKFold` is used, else,\n :class:`sklearn.model_selection.KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n gcv_mode : {None, 'auto', 'svd', eigen'}, optional\n Flag indicating which strategy to use when performing\n Generalized Cross-Validation. Options are::\n\n 'auto' : use svd if n_samples > n_features or when X is a sparse\n matrix, otherwise use eigen\n 'svd' : force computation via singular value decomposition of X\n (does not work for sparse matrices)\n 'eigen' : force computation via eigendecomposition of X^T X\n\n The 'auto' mode is the default and is intended to pick the cheaper\n option of the two depending upon the shape and format of the training\n data.\n\n store_cv_values : boolean, default=False\n Flag indicating if the cross-validation values corresponding to\n each alpha should be stored in the ``cv_values_`` attribute (see\n below). This flag is only compatible with ``cv=None`` (i.e. using\n Generalized Cross-Validation).\n\n Attributes\n ----------\n cv_values_ : array, shape = [n_samples, n_alphas] or \\\n shape = [n_samples, n_targets, n_alphas], optional\n Cross-validation values for each alpha (if ``store_cv_values=True``\\\n and ``cv=None``). After ``fit()`` has been called, this attribute \\\n will contain the mean squared errors (by default) or the values \\\n of the ``{loss,score}_func`` function (if provided in the constructor).\n\n coef_ : array, shape = [n_features] or [n_targets, n_features]\n Weight vector(s).\n\n intercept_ : float | array, shape = (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n alpha_ : float\n Estimated regularization parameter.\n\n Examples\n --------\n >>> from sklearn.datasets import load_diabetes\n >>> from sklearn.linear_model import RidgeCV\n >>> X, y = load_diabetes(return_X_y=True)\n >>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)\n >>> clf.score(X, y) # doctest: +ELLIPSIS\n 0.5166...\n\n See also\n --------\n Ridge : Ridge regression\n RidgeClassifier : Ridge classifier\n RidgeClassifierCV : Ridge classifier with built-in cross validation\n \"\"\"\n pass\n\n\nclass RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n \"\"\"Ridge classifier with built-in cross-validation.\n\n See glossary entry for :term:`cross-validation estimator`.\n\n By default, it performs Generalized Cross-Validation, which is a form of\n efficient Leave-One-Out cross-validation. Currently, only the n_features >\n n_samples case is handled efficiently.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alphas : numpy array of shape [n_alphas]\n Array of alpha values to try.\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as\n LogisticRegression or LinearSVC.\n\n fit_intercept : boolean\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the efficient Leave-One-Out cross-validation\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n class_weight : dict or 'balanced', optional\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n\n store_cv_values : boolean, default=False\n Flag indicating if the cross-validation values corresponding to\n each alpha should be stored in the ``cv_values_`` attribute (see\n below). This flag is only compatible with ``cv=None`` (i.e. using\n Generalized Cross-Validation).\n\n Attributes\n ----------\n cv_values_ : array, shape = [n_samples, n_targets, n_alphas], optional\n Cross-validation values for each alpha (if ``store_cv_values=True`` and\n ``cv=None``). After ``fit()`` has been called, this attribute will\n contain the mean squared errors (by default) or the values of the\n ``{loss,score}_func`` function (if provided in the constructor).\n\n coef_ : array, shape = [n_features] or [n_targets, n_features]\n Weight vector(s).\n\n intercept_ : float | array, shape = (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n alpha_ : float\n Estimated regularization parameter\n\n Examples\n --------\n >>> from sklearn.datasets import load_breast_cancer\n >>> from sklearn.linear_model import RidgeClassifierCV\n >>> X, y = load_breast_cancer(return_X_y=True)\n >>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)\n >>> clf.score(X, y) # doctest: +ELLIPSIS\n 0.9630...\n\n See also\n --------\n Ridge : Ridge regression\n RidgeClassifier : Ridge classifier\n RidgeCV : Ridge regression with built-in cross validation\n\n Notes\n -----\n For multi-class classification, n_class classifiers are trained in\n a one-versus-all approach. Concretely, this is implemented by taking\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n\n def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,\n normalize=False, scoring=None, cv=None, class_weight=None,\n store_cv_values=False):\n super().__init__(\n alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,\n scoring=scoring, cv=cv, store_cv_values=store_cv_values)\n self.class_weight = class_weight\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the ridge classifier.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like, shape (n_samples,)\n Target values. Will be cast to X's dtype if necessary\n\n sample_weight : float or numpy array of shape (n_samples,)\n Sample weight.\n\n Returns\n -------\n self : object\n \"\"\"\n check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],\n multi_output=True)\n\n self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)\n Y = self._label_binarizer.fit_transform(y)\n if not self._label_binarizer.y_type_.startswith('multilabel'):\n y = column_or_1d(y, warn=True)\n\n if self.class_weight:\n if sample_weight is None:\n sample_weight = 1.\n # modify the sample weights with the corresponding class weight\n sample_weight = (sample_weight *\n compute_sample_weight(self.class_weight, y))\n\n _BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)\n return self\n\n @property\n def classes_(self):\n return self._label_binarizer.classes_\n", "from itertools import chain, product\nimport warnings\n\nimport pytest\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom sklearn.utils.testing import (assert_equal, assert_raises,\n assert_array_equal,\n SkipTest, assert_raises_regex,\n assert_warns_message, assert_no_warnings)\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils import deprecated\nfrom sklearn.utils import resample\nfrom sklearn.utils import safe_mask\nfrom sklearn.utils import column_or_1d\nfrom sklearn.utils import safe_indexing\nfrom sklearn.utils import shuffle\nfrom sklearn.utils import gen_even_slices\nfrom sklearn.utils import get_chunk_n_rows\nfrom sklearn.utils import is_scalar_nan\nfrom sklearn.utils.mocking import MockDataFrame\nfrom sklearn import config_context\n\n\ndef test_make_rng():\n # Check the check_random_state utility function behavior\n assert check_random_state(None) is np.random.mtrand._rand\n assert check_random_state(np.random) is np.random.mtrand._rand\n\n rng_42 = np.random.RandomState(42)\n assert check_random_state(42).randint(100) == rng_42.randint(100)\n\n rng_42 = np.random.RandomState(42)\n assert check_random_state(rng_42) is rng_42\n\n rng_42 = np.random.RandomState(42)\n assert check_random_state(43).randint(100) != rng_42.randint(100)\n\n assert_raises(ValueError, check_random_state, \"some invalid seed\")\n\n\ndef test_deprecated():\n # Test whether the deprecated decorator issues appropriate warnings\n # Copied almost verbatim from https://docs.python.org/library/warnings.html\n\n # First a function...\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n\n @deprecated()\n def ham():\n return \"spam\"\n\n spam = ham()\n\n assert_equal(spam, \"spam\") # function must remain usable\n\n assert_equal(len(w), 1)\n assert issubclass(w[0].category, DeprecationWarning)\n assert \"deprecated\" in str(w[0].message).lower()\n\n # ... then a class.\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n\n @deprecated(\"don't use this\")\n class Ham(object):\n SPAM = 1\n\n ham = Ham()\n\n assert hasattr(ham, \"SPAM\")\n\n assert_equal(len(w), 1)\n assert issubclass(w[0].category, DeprecationWarning)\n assert \"deprecated\" in str(w[0].message).lower()\n\n\ndef test_resample():\n # Border case not worth mentioning in doctests\n assert resample() is None\n\n # Check that invalid arguments yield ValueError\n assert_raises(ValueError, resample, [0], [0, 1])\n assert_raises(ValueError, resample, [0, 1], [0, 1],\n replace=False, n_samples=3)\n assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)\n # Issue:6581, n_samples can be more when replace is True (default).\n assert_equal(len(resample([1, 2], n_samples=5)), 5)\n\n\ndef test_safe_mask():\n random_state = check_random_state(0)\n X = random_state.rand(5, 4)\n X_csr = sp.csr_matrix(X)\n mask = [False, False, True, True, True]\n\n mask = safe_mask(X, mask)\n assert_equal(X[mask].shape[0], 3)\n\n mask = safe_mask(X_csr, mask)\n assert_equal(X_csr[mask].shape[0], 3)\n\n\ndef test_column_or_1d():\n EXAMPLES = [\n (\"binary\", [\"spam\", \"egg\", \"spam\"]),\n (\"binary\", [0, 1, 0, 1]),\n (\"continuous\", np.arange(10) / 20.),\n (\"multiclass\", [1, 2, 3]),\n (\"multiclass\", [0, 1, 2, 2, 0]),\n (\"multiclass\", [[1], [2], [3]]),\n (\"multilabel-indicator\", [[0, 1, 0], [0, 0, 1]]),\n (\"multiclass-multioutput\", [[1, 2, 3]]),\n (\"multiclass-multioutput\", [[1, 1], [2, 2], [3, 1]]),\n (\"multiclass-multioutput\", [[5, 1], [4, 2], [3, 1]]),\n (\"multiclass-multioutput\", [[1, 2, 3]]),\n (\"continuous-multioutput\", np.arange(30).reshape((-1, 3))),\n ]\n\n for y_type, y in EXAMPLES:\n if y_type in [\"binary\", 'multiclass', \"continuous\"]:\n assert_array_equal(column_or_1d(y), np.ravel(y))\n else:\n assert_raises(ValueError, column_or_1d, y)\n\n\ndef test_safe_indexing():\n X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n inds = np.array([1, 2])\n X_inds = safe_indexing(X, inds)\n X_arrays = safe_indexing(np.array(X), inds)\n assert_array_equal(np.array(X_inds), X_arrays)\n assert_array_equal(np.array(X_inds), np.array(X)[inds])\n\n\ndef test_safe_indexing_pandas():\n try:\n import pandas as pd\n except ImportError:\n raise SkipTest(\"Pandas not found\")\n X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n X_df = pd.DataFrame(X)\n inds = np.array([1, 2])\n X_df_indexed = safe_indexing(X_df, inds)\n X_indexed = safe_indexing(X_df, inds)\n assert_array_equal(np.array(X_df_indexed), X_indexed)\n # fun with read-only data in dataframes\n # this happens in joblib memmapping\n X.setflags(write=False)\n X_df_readonly = pd.DataFrame(X)\n inds_readonly = inds.copy()\n inds_readonly.setflags(write=False)\n\n for this_df, this_inds in product([X_df, X_df_readonly],\n [inds, inds_readonly]):\n with warnings.catch_warnings(record=True):\n X_df_indexed = safe_indexing(this_df, this_inds)\n\n assert_array_equal(np.array(X_df_indexed), X_indexed)\n\n\ndef test_safe_indexing_mock_pandas():\n X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n X_df = MockDataFrame(X)\n inds = np.array([1, 2])\n X_df_indexed = safe_indexing(X_df, inds)\n X_indexed = safe_indexing(X_df, inds)\n assert_array_equal(np.array(X_df_indexed), X_indexed)\n\n\ndef test_shuffle_on_ndim_equals_three():\n def to_tuple(A): # to make the inner arrays hashable\n return tuple(tuple(tuple(C) for C in B) for B in A)\n\n A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)\n S = set(to_tuple(A))\n shuffle(A) # shouldn't raise a ValueError for dim = 3\n assert_equal(set(to_tuple(A)), S)\n\n\ndef test_shuffle_dont_convert_to_array():\n # Check that shuffle does not try to convert to numpy arrays with float\n # dtypes can let any indexable datastructure pass-through.\n a = ['a', 'b', 'c']\n b = np.array(['a', 'b', 'c'], dtype=object)\n c = [1, 2, 3]\n d = MockDataFrame(np.array([['a', 0],\n ['b', 1],\n ['c', 2]],\n dtype=object))\n e = sp.csc_matrix(np.arange(6).reshape(3, 2))\n a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)\n\n assert_equal(a_s, ['c', 'b', 'a'])\n assert_equal(type(a_s), list)\n\n assert_array_equal(b_s, ['c', 'b', 'a'])\n assert_equal(b_s.dtype, object)\n\n assert_equal(c_s, [3, 2, 1])\n assert_equal(type(c_s), list)\n\n assert_array_equal(d_s, np.array([['c', 2],\n ['b', 1],\n ['a', 0]],\n dtype=object))\n assert_equal(type(d_s), MockDataFrame)\n\n assert_array_equal(e_s.toarray(), np.array([[4, 5],\n [2, 3],\n [0, 1]]))\n\n\ndef test_gen_even_slices():\n # check that gen_even_slices contains all samples\n some_range = range(10)\n joined_range = list(chain(*[some_range[slice] for slice in\n gen_even_slices(10, 3)]))\n assert_array_equal(some_range, joined_range)\n\n # check that passing negative n_chunks raises an error\n slices = gen_even_slices(10, -1)\n assert_raises_regex(ValueError, \"gen_even_slices got n_packs=-1, must be\"\n \" >=1\", next, slices)\n\n\[email protected](\n ('row_bytes', 'max_n_rows', 'working_memory', 'expected', 'warning'),\n [(1024, None, 1, 1024, None),\n (1024, None, 0.99999999, 1023, None),\n (1023, None, 1, 1025, None),\n (1025, None, 1, 1023, None),\n (1024, None, 2, 2048, None),\n (1024, 7, 1, 7, None),\n (1024 * 1024, None, 1, 1, None),\n (1024 * 1024 + 1, None, 1, 1,\n 'Could not adhere to working_memory config. '\n 'Currently 1MiB, 2MiB required.'),\n ])\ndef test_get_chunk_n_rows(row_bytes, max_n_rows, working_memory,\n expected, warning):\n if warning is not None:\n def check_warning(*args, **kw):\n return assert_warns_message(UserWarning, warning, *args, **kw)\n else:\n check_warning = assert_no_warnings\n\n actual = check_warning(get_chunk_n_rows,\n row_bytes=row_bytes,\n max_n_rows=max_n_rows,\n working_memory=working_memory)\n\n assert actual == expected\n assert type(actual) is type(expected)\n with config_context(working_memory=working_memory):\n actual = check_warning(get_chunk_n_rows,\n row_bytes=row_bytes,\n max_n_rows=max_n_rows)\n assert actual == expected\n assert type(actual) is type(expected)\n\n\[email protected](\"value, result\", [(float(\"nan\"), True),\n (np.nan, True),\n (np.float(\"nan\"), True),\n (np.float32(\"nan\"), True),\n (np.float64(\"nan\"), True),\n (0, False),\n (0., False),\n (None, False),\n (\"\", False),\n (\"nan\", False),\n ([np.nan], False)])\ndef test_is_scalar_nan(value, result):\n assert is_scalar_nan(value) is result\n\n\ndef dummy_func():\n pass\n\n\ndef test_deprecation_joblib_api(tmpdir):\n def check_warning(*args, **kw):\n return assert_warns_message(\n DeprecationWarning, \"deprecated in version 0.20.1\", *args, **kw)\n\n # Ensure that the joblib API is deprecated in sklearn.util\n from sklearn.utils import Parallel, Memory, delayed\n from sklearn.utils import cpu_count, hash, effective_n_jobs\n check_warning(Memory, str(tmpdir))\n check_warning(hash, 1)\n check_warning(Parallel)\n check_warning(cpu_count)\n check_warning(effective_n_jobs, 1)\n check_warning(delayed, dummy_func)\n\n # Only parallel_backend and register_parallel_backend are not deprecated in\n # sklearn.utils\n from sklearn.utils import parallel_backend, register_parallel_backend\n assert_no_warnings(parallel_backend, 'loky', None)\n assert_no_warnings(register_parallel_backend, 'failing', None)\n\n # Ensure that the deprecation have no side effect in sklearn.utils._joblib\n from sklearn.utils._joblib import Parallel, Memory, delayed\n from sklearn.utils._joblib import cpu_count, hash, effective_n_jobs\n from sklearn.utils._joblib import parallel_backend\n from sklearn.utils._joblib import register_parallel_backend\n assert_no_warnings(Memory, str(tmpdir))\n assert_no_warnings(hash, 1)\n assert_no_warnings(Parallel)\n assert_no_warnings(cpu_count)\n assert_no_warnings(effective_n_jobs, 1)\n assert_no_warnings(delayed, dummy_func)\n assert_no_warnings(parallel_backend, 'loky', None)\n assert_no_warnings(register_parallel_backend, 'failing', None)\n\n from sklearn.utils._joblib import joblib\n del joblib.parallel.BACKENDS['failing']\n", "\"\"\"\nLeast Angle Regression algorithm. See the documentation on the\nGeneralized Linear Model for a complete discussion.\n\"\"\"\nfrom __future__ import print_function\n\n# Author: Fabian Pedregosa <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Gael Varoquaux\n#\n# License: BSD 3 clause\n\nfrom math import log\nimport sys\nimport warnings\n\nimport numpy as np\nfrom scipy import linalg, interpolate\nfrom scipy.linalg.lapack import get_lapack_funcs\n\nfrom .base import LinearModel\nfrom ..base import RegressorMixin\nfrom ..utils import arrayfuncs, as_float_array, check_X_y\nfrom ..model_selection import check_cv\nfrom ..exceptions import ConvergenceWarning\nfrom ..utils._joblib import Parallel, delayed\n\nsolve_triangular_args = {'check_finite': False}\n\n\ndef lars_path(X, y, Xy=None, Gram=None, max_iter=500,\n alpha_min=0, method='lar', copy_X=True,\n eps=np.finfo(np.float).eps,\n copy_Gram=True, verbose=0, return_path=True,\n return_n_iter=False, positive=False):\n \"\"\"Compute Least Angle Regression or Lasso path using LARS algorithm [1]\n\n The optimization objective for the case method='lasso' is::\n\n (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1\n\n in the case of method='lars', the objective function is only known in\n the form of an implicit equation (see discussion in [1])\n\n Read more in the :ref:`User Guide <least_angle_regression>`.\n\n Parameters\n -----------\n X : array, shape: (n_samples, n_features)\n Input data.\n\n y : array, shape: (n_samples)\n Input targets.\n\n Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \\\n optional\n Xy = np.dot(X.T, y) that can be precomputed. It is useful\n only when the Gram matrix is precomputed.\n\n Gram : None, 'auto', array, shape: (n_features, n_features), optional\n Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram\n matrix is precomputed from the given X, if there are more samples\n than features.\n\n max_iter : integer, optional (default=500)\n Maximum number of iterations to perform, set to infinity for no limit.\n\n alpha_min : float, optional (default=0)\n Minimum correlation along the path. It corresponds to the\n regularization parameter alpha parameter in the Lasso.\n\n method : {'lar', 'lasso'}, optional (default='lar')\n Specifies the returned model. Select ``'lar'`` for Least Angle\n Regression, ``'lasso'`` for the Lasso.\n\n copy_X : bool, optional (default=True)\n If ``False``, ``X`` is overwritten.\n\n eps : float, optional (default=``np.finfo(np.float).eps``)\n The machine-precision regularization in the computation of the\n Cholesky diagonal factors. Increase this for very ill-conditioned\n systems.\n\n copy_Gram : bool, optional (default=True)\n If ``False``, ``Gram`` is overwritten.\n\n verbose : int (default=0)\n Controls output verbosity.\n\n return_path : bool, optional (default=True)\n If ``return_path==True`` returns the entire path, else returns only the\n last point of the path.\n\n return_n_iter : bool, optional (default=False)\n Whether to return the number of iterations.\n\n positive : boolean (default=False)\n Restrict coefficients to be >= 0.\n This option is only allowed with method 'lasso'. Note that the model\n coefficients will not converge to the ordinary-least-squares solution\n for small values of alpha. Only coefficients up to the smallest alpha\n value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by\n the stepwise Lars-Lasso algorithm are typically in congruence with the\n solution of the coordinate descent lasso_path function.\n\n Returns\n --------\n alphas : array, shape: [n_alphas + 1]\n Maximum of covariances (in absolute value) at each iteration.\n ``n_alphas`` is either ``max_iter``, ``n_features`` or the\n number of nodes in the path with ``alpha >= alpha_min``, whichever\n is smaller.\n\n active : array, shape [n_alphas]\n Indices of active variables at the end of the path.\n\n coefs : array, shape (n_features, n_alphas + 1)\n Coefficients along the path\n\n n_iter : int\n Number of iterations run. Returned only if return_n_iter is set\n to True.\n\n See also\n --------\n lasso_path\n LassoLars\n Lars\n LassoLarsCV\n LarsCV\n sklearn.decomposition.sparse_encode\n\n References\n ----------\n .. [1] \"Least Angle Regression\", Effron et al.\n https://statweb.stanford.edu/~tibs/ftp/lars.pdf\n\n .. [2] `Wikipedia entry on the Least-angle regression\n <https://en.wikipedia.org/wiki/Least-angle_regression>`_\n\n .. [3] `Wikipedia entry on the Lasso\n <https://en.wikipedia.org/wiki/Lasso_(statistics)>`_\n\n \"\"\"\n if method == 'lar' and positive:\n warnings.warn('positive option is broken for Least'\n ' Angle Regression (LAR). Use method=\"lasso\".'\n ' This option will be removed in version 0.22.',\n DeprecationWarning)\n\n n_features = X.shape[1]\n n_samples = y.size\n max_features = min(max_iter, n_features)\n\n if return_path:\n coefs = np.zeros((max_features + 1, n_features))\n alphas = np.zeros(max_features + 1)\n else:\n coef, prev_coef = np.zeros(n_features), np.zeros(n_features)\n alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?\n\n n_iter, n_active = 0, 0\n active, indices = list(), np.arange(n_features)\n # holds the sign of covariance\n sign_active = np.empty(max_features, dtype=np.int8)\n drop = False\n\n # will hold the cholesky factorization. Only lower part is\n # referenced.\n L = np.empty((max_features, max_features), dtype=X.dtype)\n swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))\n solve_cholesky, = get_lapack_funcs(('potrs',), (X,))\n\n if Gram is None or Gram is False:\n Gram = None\n if copy_X:\n # force copy. setting the array to be fortran-ordered\n # speeds up the calculation of the (partial) Gram matrix\n # and allows to easily swap columns\n X = X.copy('F')\n\n elif isinstance(Gram, str) and Gram == 'auto' or Gram is True:\n if Gram is True or X.shape[0] > X.shape[1]:\n Gram = np.dot(X.T, X)\n else:\n Gram = None\n elif copy_Gram:\n Gram = Gram.copy()\n\n if Xy is None:\n Cov = np.dot(X.T, y)\n else:\n Cov = Xy.copy()\n\n if verbose:\n if verbose > 1:\n print(\"Step\\t\\tAdded\\t\\tDropped\\t\\tActive set size\\t\\tC\")\n else:\n sys.stdout.write('.')\n sys.stdout.flush()\n\n tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning\n equality_tolerance = np.finfo(np.float32).eps\n\n while True:\n if Cov.size:\n if positive:\n C_idx = np.argmax(Cov)\n else:\n C_idx = np.argmax(np.abs(Cov))\n\n C_ = Cov[C_idx]\n\n if positive:\n C = C_\n else:\n C = np.fabs(C_)\n else:\n C = 0.\n\n if return_path:\n alpha = alphas[n_iter, np.newaxis]\n coef = coefs[n_iter]\n prev_alpha = alphas[n_iter - 1, np.newaxis]\n prev_coef = coefs[n_iter - 1]\n\n alpha[0] = C / n_samples\n if alpha[0] <= alpha_min + equality_tolerance: # early stopping\n if abs(alpha[0] - alpha_min) > equality_tolerance:\n # interpolation factor 0 <= ss < 1\n if n_iter > 0:\n # In the first iteration, all alphas are zero, the formula\n # below would make ss a NaN\n ss = ((prev_alpha[0] - alpha_min) /\n (prev_alpha[0] - alpha[0]))\n coef[:] = prev_coef + ss * (coef - prev_coef)\n alpha[0] = alpha_min\n if return_path:\n coefs[n_iter] = coef\n break\n\n if n_iter >= max_iter or n_active >= n_features:\n break\n\n if not drop:\n\n ##########################################################\n # Append x_j to the Cholesky factorization of (Xa * Xa') #\n # #\n # ( L 0 ) #\n # L -> ( ) , where L * w = Xa' x_j #\n # ( w z ) and z = ||x_j|| #\n # #\n ##########################################################\n\n if positive:\n sign_active[n_active] = np.ones_like(C_)\n else:\n sign_active[n_active] = np.sign(C_)\n m, n = n_active, C_idx + n_active\n\n Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])\n indices[n], indices[m] = indices[m], indices[n]\n Cov_not_shortened = Cov\n Cov = Cov[1:] # remove Cov[0]\n\n if Gram is None:\n X.T[n], X.T[m] = swap(X.T[n], X.T[m])\n c = nrm2(X.T[n_active]) ** 2\n L[n_active, :n_active] = \\\n np.dot(X.T[n_active], X.T[:n_active].T)\n else:\n # swap does only work inplace if matrix is fortran\n # contiguous ...\n Gram[m], Gram[n] = swap(Gram[m], Gram[n])\n Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])\n c = Gram[n_active, n_active]\n L[n_active, :n_active] = Gram[n_active, :n_active]\n\n # Update the cholesky decomposition for the Gram matrix\n if n_active:\n linalg.solve_triangular(L[:n_active, :n_active],\n L[n_active, :n_active],\n trans=0, lower=1,\n overwrite_b=True,\n **solve_triangular_args)\n\n v = np.dot(L[n_active, :n_active], L[n_active, :n_active])\n diag = max(np.sqrt(np.abs(c - v)), eps)\n L[n_active, n_active] = diag\n\n if diag < 1e-7:\n # The system is becoming too ill-conditioned.\n # We have degenerate vectors in our active set.\n # We'll 'drop for good' the last regressor added.\n\n # Note: this case is very rare. It is no longer triggered by\n # the test suite. The `equality_tolerance` margin added in 0.16\n # to get early stopping to work consistently on all versions of\n # Python including 32 bit Python under Windows seems to make it\n # very difficult to trigger the 'drop for good' strategy.\n warnings.warn('Regressors in active set degenerate. '\n 'Dropping a regressor, after %i iterations, '\n 'i.e. alpha=%.3e, '\n 'with an active set of %i regressors, and '\n 'the smallest cholesky pivot element being %.3e.'\n ' Reduce max_iter or increase eps parameters.'\n % (n_iter, alpha, n_active, diag),\n ConvergenceWarning)\n\n # XXX: need to figure a 'drop for good' way\n Cov = Cov_not_shortened\n Cov[0] = 0\n Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])\n continue\n\n active.append(indices[n_active])\n n_active += 1\n\n if verbose > 1:\n print(\"%s\\t\\t%s\\t\\t%s\\t\\t%s\\t\\t%s\" % (n_iter, active[-1], '',\n n_active, C))\n\n if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:\n # alpha is increasing. This is because the updates of Cov are\n # bringing in too much numerical error that is greater than\n # than the remaining correlation with the\n # regressors. Time to bail out\n warnings.warn('Early stopping the lars path, as the residues '\n 'are small and the current value of alpha is no '\n 'longer well controlled. %i iterations, alpha=%.3e, '\n 'previous alpha=%.3e, with an active set of %i '\n 'regressors.'\n % (n_iter, alpha, prev_alpha, n_active),\n ConvergenceWarning)\n break\n\n # least squares solution\n least_squares, info = solve_cholesky(L[:n_active, :n_active],\n sign_active[:n_active],\n lower=True)\n\n if least_squares.size == 1 and least_squares == 0:\n # This happens because sign_active[:n_active] = 0\n least_squares[...] = 1\n AA = 1.\n else:\n # is this really needed ?\n AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))\n\n if not np.isfinite(AA):\n # L is too ill-conditioned\n i = 0\n L_ = L[:n_active, :n_active].copy()\n while not np.isfinite(AA):\n L_.flat[::n_active + 1] += (2 ** i) * eps\n least_squares, info = solve_cholesky(\n L_, sign_active[:n_active], lower=True)\n tmp = max(np.sum(least_squares * sign_active[:n_active]),\n eps)\n AA = 1. / np.sqrt(tmp)\n i += 1\n least_squares *= AA\n\n if Gram is None:\n # equiangular direction of variables in the active set\n eq_dir = np.dot(X.T[:n_active].T, least_squares)\n # correlation between each unactive variables and\n # eqiangular vector\n corr_eq_dir = np.dot(X.T[n_active:], eq_dir)\n else:\n # if huge number of features, this takes 50% of time, I\n # think could be avoided if we just update it using an\n # orthogonal (QR) decomposition of X\n corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,\n least_squares)\n\n g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny32))\n if positive:\n gamma_ = min(g1, C / AA)\n else:\n g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny32))\n gamma_ = min(g1, g2, C / AA)\n\n # TODO: better names for these variables: z\n drop = False\n z = -coef[active] / (least_squares + tiny32)\n z_pos = arrayfuncs.min_pos(z)\n if z_pos < gamma_:\n # some coefficients have changed sign\n idx = np.where(z == z_pos)[0][::-1]\n\n # update the sign, important for LAR\n sign_active[idx] = -sign_active[idx]\n\n if method == 'lasso':\n gamma_ = z_pos\n drop = True\n\n n_iter += 1\n\n if return_path:\n if n_iter >= coefs.shape[0]:\n del coef, alpha, prev_alpha, prev_coef\n # resize the coefs and alphas array\n add_features = 2 * max(1, (max_features - n_active))\n coefs = np.resize(coefs, (n_iter + add_features, n_features))\n coefs[-add_features:] = 0\n alphas = np.resize(alphas, n_iter + add_features)\n alphas[-add_features:] = 0\n coef = coefs[n_iter]\n prev_coef = coefs[n_iter - 1]\n else:\n # mimic the effect of incrementing n_iter on the array references\n prev_coef = coef\n prev_alpha[0] = alpha[0]\n coef = np.zeros_like(coef)\n\n coef[active] = prev_coef[active] + gamma_ * least_squares\n\n # update correlations\n Cov -= gamma_ * corr_eq_dir\n\n # See if any coefficient has changed sign\n if drop and method == 'lasso':\n\n # handle the case when idx is not length of 1\n [arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in\n idx]\n\n n_active -= 1\n # handle the case when idx is not length of 1\n drop_idx = [active.pop(ii) for ii in idx]\n\n if Gram is None:\n # propagate dropped variable\n for ii in idx:\n for i in range(ii, n_active):\n X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])\n # yeah this is stupid\n indices[i], indices[i + 1] = indices[i + 1], indices[i]\n\n # TODO: this could be updated\n residual = y - np.dot(X[:, :n_active], coef[active])\n temp = np.dot(X.T[n_active], residual)\n\n Cov = np.r_[temp, Cov]\n else:\n for ii in idx:\n for i in range(ii, n_active):\n indices[i], indices[i + 1] = indices[i + 1], indices[i]\n Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])\n Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],\n Gram[:, i + 1])\n\n # Cov_n = Cov_j + x_j * X + increment(betas) TODO:\n # will this still work with multiple drops ?\n\n # recompute covariance. Probably could be done better\n # wrong as Xy is not swapped with the rest of variables\n\n # TODO: this could be updated\n residual = y - np.dot(X, coef)\n temp = np.dot(X.T[drop_idx], residual)\n Cov = np.r_[temp, Cov]\n\n sign_active = np.delete(sign_active, idx)\n sign_active = np.append(sign_active, 0.) # just to maintain size\n if verbose > 1:\n print(\"%s\\t\\t%s\\t\\t%s\\t\\t%s\\t\\t%s\" % (n_iter, '', drop_idx,\n n_active, abs(temp)))\n\n if return_path:\n # resize coefs in case of early stop\n alphas = alphas[:n_iter + 1]\n coefs = coefs[:n_iter + 1]\n\n if return_n_iter:\n return alphas, active, coefs.T, n_iter\n else:\n return alphas, active, coefs.T\n else:\n if return_n_iter:\n return alpha, active, coef, n_iter\n else:\n return alpha, active, coef\n\n\n###############################################################################\n# Estimator classes\n\nclass Lars(LinearModel, RegressorMixin):\n \"\"\"Least Angle Regression model a.k.a. LAR\n\n Read more in the :ref:`User Guide <least_angle_regression>`.\n\n Parameters\n ----------\n fit_intercept : boolean\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n verbose : boolean or integer, optional\n Sets the verbosity amount\n\n normalize : boolean, optional, default True\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n precompute : True | False | 'auto' | array-like\n Whether to use a precomputed Gram matrix to speed up\n calculations. If set to ``'auto'`` let us decide. The Gram\n matrix can also be passed as argument.\n\n n_nonzero_coefs : int, optional\n Target number of non-zero coefficients. Use ``np.inf`` for no limit.\n\n eps : float, optional\n The machine-precision regularization in the computation of the\n Cholesky diagonal factors. Increase this for very ill-conditioned\n systems. Unlike the ``tol`` parameter in some iterative\n optimization-based algorithms, this parameter does not control\n the tolerance of the optimization.\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n\n fit_path : boolean\n If True the full path is stored in the ``coef_path_`` attribute.\n If you compute the solution for a large problem or many targets,\n setting ``fit_path`` to ``False`` will lead to a speedup, especially\n with a small alpha.\n\n positive : boolean (default=False)\n Restrict coefficients to be >= 0. Be aware that you might want to\n remove fit_intercept which is set True by default.\n\n .. deprecated:: 0.20\n\n The option is broken and deprecated. It will be removed in v0.22.\n\n Attributes\n ----------\n alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays\n Maximum of covariances (in absolute value) at each iteration. \\\n ``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \\\n whichever is smaller.\n\n active_ : list, length = n_alphas | list of n_targets such lists\n Indices of active variables at the end of the path.\n\n coef_path_ : array, shape (n_features, n_alphas + 1) \\\n | list of n_targets such arrays\n The varying values of the coefficients along the path. It is not\n present if the ``fit_path`` parameter is ``False``.\n\n coef_ : array, shape (n_features,) or (n_targets, n_features)\n Parameter vector (w in the formulation formula).\n\n intercept_ : float | array, shape (n_targets,)\n Independent term in decision function.\n\n n_iter_ : array-like or int\n The number of iterations taken by lars_path to find the\n grid of alphas for each target.\n\n Examples\n --------\n >>> from sklearn import linear_model\n >>> reg = linear_model.Lars(n_nonzero_coefs=1)\n >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])\n ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,\n n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',\n verbose=False)\n >>> print(reg.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n [ 0. -1.11...]\n\n See also\n --------\n lars_path, LarsCV\n sklearn.decomposition.sparse_encode\n\n \"\"\"\n method = 'lar'\n\n def __init__(self, fit_intercept=True, verbose=False, normalize=True,\n precompute='auto', n_nonzero_coefs=500,\n eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,\n positive=False):\n self.fit_intercept = fit_intercept\n self.verbose = verbose\n self.normalize = normalize\n self.precompute = precompute\n self.n_nonzero_coefs = n_nonzero_coefs\n self.positive = positive\n self.eps = eps\n self.copy_X = copy_X\n self.fit_path = fit_path\n\n @staticmethod\n def _get_gram(precompute, X, y):\n if (not hasattr(precompute, '__array__')) and (\n (precompute is True) or\n (precompute == 'auto' and X.shape[0] > X.shape[1]) or\n (precompute == 'auto' and y.shape[1] > 1)):\n precompute = np.dot(X.T, X)\n\n return precompute\n\n def _fit(self, X, y, max_iter, alpha, fit_path, Xy=None):\n \"\"\"Auxiliary method to fit the model using X, y as training data\"\"\"\n n_features = X.shape[1]\n\n X, y, X_offset, y_offset, X_scale = self._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X)\n\n if y.ndim == 1:\n y = y[:, np.newaxis]\n\n n_targets = y.shape[1]\n\n Gram = self._get_gram(self.precompute, X, y)\n\n self.alphas_ = []\n self.n_iter_ = []\n self.coef_ = np.empty((n_targets, n_features))\n\n if fit_path:\n self.active_ = []\n self.coef_path_ = []\n for k in range(n_targets):\n this_Xy = None if Xy is None else Xy[:, k]\n alphas, active, coef_path, n_iter_ = lars_path(\n X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,\n copy_Gram=True, alpha_min=alpha, method=self.method,\n verbose=max(0, self.verbose - 1), max_iter=max_iter,\n eps=self.eps, return_path=True,\n return_n_iter=True, positive=self.positive)\n self.alphas_.append(alphas)\n self.active_.append(active)\n self.n_iter_.append(n_iter_)\n self.coef_path_.append(coef_path)\n self.coef_[k] = coef_path[:, -1]\n\n if n_targets == 1:\n self.alphas_, self.active_, self.coef_path_, self.coef_ = [\n a[0] for a in (self.alphas_, self.active_, self.coef_path_,\n self.coef_)]\n self.n_iter_ = self.n_iter_[0]\n else:\n for k in range(n_targets):\n this_Xy = None if Xy is None else Xy[:, k]\n alphas, _, self.coef_[k], n_iter_ = lars_path(\n X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,\n copy_Gram=True, alpha_min=alpha, method=self.method,\n verbose=max(0, self.verbose - 1), max_iter=max_iter,\n eps=self.eps, return_path=False, return_n_iter=True,\n positive=self.positive)\n self.alphas_.append(alphas)\n self.n_iter_.append(n_iter_)\n if n_targets == 1:\n self.alphas_ = self.alphas_[0]\n self.n_iter_ = self.n_iter_[0]\n\n self._set_intercept(X_offset, y_offset, X_scale)\n return self\n\n def fit(self, X, y, Xy=None):\n \"\"\"Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data.\n\n y : array-like, shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \\\n optional\n Xy = np.dot(X.T, y) that can be precomputed. It is useful\n only when the Gram matrix is precomputed.\n\n Returns\n -------\n self : object\n returns an instance of self.\n \"\"\"\n X, y = check_X_y(X, y, y_numeric=True, multi_output=True)\n\n alpha = getattr(self, 'alpha', 0.)\n if hasattr(self, 'n_nonzero_coefs'):\n alpha = 0. # n_nonzero_coefs parametrization takes priority\n max_iter = self.n_nonzero_coefs\n else:\n max_iter = self.max_iter\n\n self._fit(X, y, max_iter=max_iter, alpha=alpha, fit_path=self.fit_path,\n Xy=Xy)\n\n return self\n\n\nclass LassoLars(Lars):\n \"\"\"Lasso model fit with Least Angle Regression a.k.a. Lars\n\n It is a Linear Model trained with an L1 prior as regularizer.\n\n The optimization objective for Lasso is::\n\n (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1\n\n Read more in the :ref:`User Guide <least_angle_regression>`.\n\n Parameters\n ----------\n alpha : float\n Constant that multiplies the penalty term. Defaults to 1.0.\n ``alpha = 0`` is equivalent to an ordinary least square, solved\n by :class:`LinearRegression`. For numerical reasons, using\n ``alpha = 0`` with the LassoLars object is not advised and you\n should prefer the LinearRegression object.\n\n fit_intercept : boolean\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n verbose : boolean or integer, optional\n Sets the verbosity amount\n\n normalize : boolean, optional, default True\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n precompute : True | False | 'auto' | array-like\n Whether to use a precomputed Gram matrix to speed up\n calculations. If set to ``'auto'`` let us decide. The Gram\n matrix can also be passed as argument.\n\n max_iter : integer, optional\n Maximum number of iterations to perform.\n\n eps : float, optional\n The machine-precision regularization in the computation of the\n Cholesky diagonal factors. Increase this for very ill-conditioned\n systems. Unlike the ``tol`` parameter in some iterative\n optimization-based algorithms, this parameter does not control\n the tolerance of the optimization.\n\n copy_X : boolean, optional, default True\n If True, X will be copied; else, it may be overwritten.\n\n fit_path : boolean\n If ``True`` the full path is stored in the ``coef_path_`` attribute.\n If you compute the solution for a large problem or many targets,\n setting ``fit_path`` to ``False`` will lead to a speedup, especially\n with a small alpha.\n\n positive : boolean (default=False)\n Restrict coefficients to be >= 0. Be aware that you might want to\n remove fit_intercept which is set True by default.\n Under the positive restriction the model coefficients will not converge\n to the ordinary-least-squares solution for small values of alpha.\n Only coefficients up to the smallest alpha value (``alphas_[alphas_ >\n 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso\n algorithm are typically in congruence with the solution of the\n coordinate descent Lasso estimator.\n\n Attributes\n ----------\n alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays\n Maximum of covariances (in absolute value) at each iteration. \\\n ``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \\\n nodes in the path with correlation greater than ``alpha``, whichever \\\n is smaller.\n\n active_ : list, length = n_alphas | list of n_targets such lists\n Indices of active variables at the end of the path.\n\n coef_path_ : array, shape (n_features, n_alphas + 1) or list\n If a list is passed it's expected to be one of n_targets such arrays.\n The varying values of the coefficients along the path. It is not\n present if the ``fit_path`` parameter is ``False``.\n\n coef_ : array, shape (n_features,) or (n_targets, n_features)\n Parameter vector (w in the formulation formula).\n\n intercept_ : float | array, shape (n_targets,)\n Independent term in decision function.\n\n n_iter_ : array-like or int.\n The number of iterations taken by lars_path to find the\n grid of alphas for each target.\n\n Examples\n --------\n >>> from sklearn import linear_model\n >>> reg = linear_model.LassoLars(alpha=0.01)\n >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])\n ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,\n fit_path=True, max_iter=500, normalize=True, positive=False,\n precompute='auto', verbose=False)\n >>> print(reg.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n [ 0. -0.963257...]\n\n See also\n --------\n lars_path\n lasso_path\n Lasso\n LassoCV\n LassoLarsCV\n LassoLarsIC\n sklearn.decomposition.sparse_encode\n\n \"\"\"\n method = 'lasso'\n\n def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,\n normalize=True, precompute='auto', max_iter=500,\n eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,\n positive=False):\n self.alpha = alpha\n self.fit_intercept = fit_intercept\n self.max_iter = max_iter\n self.verbose = verbose\n self.normalize = normalize\n self.positive = positive\n self.precompute = precompute\n self.copy_X = copy_X\n self.eps = eps\n self.fit_path = fit_path\n\n\n###############################################################################\n# Cross-validated estimator classes\n\ndef _check_copy_and_writeable(array, copy=False):\n if copy or not array.flags.writeable:\n return array.copy()\n return array\n\n\ndef _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,\n copy=True, method='lars', verbose=False,\n fit_intercept=True, normalize=True, max_iter=500,\n eps=np.finfo(np.float).eps, positive=False):\n \"\"\"Compute the residues on left-out data for a full LARS path\n\n Parameters\n -----------\n X_train : array, shape (n_samples, n_features)\n The data to fit the LARS on\n\n y_train : array, shape (n_samples)\n The target variable to fit LARS on\n\n X_test : array, shape (n_samples, n_features)\n The data to compute the residues on\n\n y_test : array, shape (n_samples)\n The target variable to compute the residues on\n\n Gram : None, 'auto', array, shape: (n_features, n_features), optional\n Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram\n matrix is precomputed from the given X, if there are more samples\n than features\n\n copy : boolean, optional\n Whether X_train, X_test, y_train and y_test should be copied;\n if False, they may be overwritten.\n\n method : 'lar' | 'lasso'\n Specifies the returned model. Select ``'lar'`` for Least Angle\n Regression, ``'lasso'`` for the Lasso.\n\n verbose : integer, optional\n Sets the amount of verbosity\n\n fit_intercept : boolean\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n positive : boolean (default=False)\n Restrict coefficients to be >= 0. Be aware that you might want to\n remove fit_intercept which is set True by default.\n See reservations for using this option in combination with method\n 'lasso' for expected small values of alpha in the doc of LassoLarsCV\n and LassoLarsIC.\n\n normalize : boolean, optional, default True\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n max_iter : integer, optional\n Maximum number of iterations to perform.\n\n eps : float, optional\n The machine-precision regularization in the computation of the\n Cholesky diagonal factors. Increase this for very ill-conditioned\n systems. Unlike the ``tol`` parameter in some iterative\n optimization-based algorithms, this parameter does not control\n the tolerance of the optimization.\n\n\n Returns\n --------\n alphas : array, shape (n_alphas,)\n Maximum of covariances (in absolute value) at each iteration.\n ``n_alphas`` is either ``max_iter`` or ``n_features``, whichever\n is smaller.\n\n active : list\n Indices of active variables at the end of the path.\n\n coefs : array, shape (n_features, n_alphas)\n Coefficients along the path\n\n residues : array, shape (n_alphas, n_samples)\n Residues of the prediction on the test data\n \"\"\"\n X_train = _check_copy_and_writeable(X_train, copy)\n y_train = _check_copy_and_writeable(y_train, copy)\n X_test = _check_copy_and_writeable(X_test, copy)\n y_test = _check_copy_and_writeable(y_test, copy)\n\n if fit_intercept:\n X_mean = X_train.mean(axis=0)\n X_train -= X_mean\n X_test -= X_mean\n y_mean = y_train.mean(axis=0)\n y_train = as_float_array(y_train, copy=False)\n y_train -= y_mean\n y_test = as_float_array(y_test, copy=False)\n y_test -= y_mean\n\n if normalize:\n norms = np.sqrt(np.sum(X_train ** 2, axis=0))\n nonzeros = np.flatnonzero(norms)\n X_train[:, nonzeros] /= norms[nonzeros]\n\n alphas, active, coefs = lars_path(\n X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,\n method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,\n positive=positive)\n if normalize:\n coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]\n residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]\n return alphas, active, coefs, residues.T\n\n\nclass LarsCV(Lars):\n \"\"\"Cross-validated Least Angle Regression model.\n\n See glossary entry for :term:`cross-validation estimator`.\n\n Read more in the :ref:`User Guide <least_angle_regression>`.\n\n Parameters\n ----------\n fit_intercept : boolean\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n verbose : boolean or integer, optional\n Sets the verbosity amount\n\n max_iter : integer, optional\n Maximum number of iterations to perform.\n\n normalize : boolean, optional, default True\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n precompute : True | False | 'auto' | array-like\n Whether to use a precomputed Gram matrix to speed up\n calculations. If set to ``'auto'`` let us decide. The Gram matrix\n cannot be passed as argument since we will use only subsets of X.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n .. versionchanged:: 0.20\n ``cv`` default value if None will change from 3-fold to 5-fold\n in v0.22.\n\n max_n_alphas : integer, optional\n The maximum number of points on the path used to compute the\n residuals in the cross-validation\n\n n_jobs : int or None, optional (default=None)\n Number of CPUs to use during the cross validation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n eps : float, optional\n The machine-precision regularization in the computation of the\n Cholesky diagonal factors. Increase this for very ill-conditioned\n systems.\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n\n positive : boolean (default=False)\n Restrict coefficients to be >= 0. Be aware that you might want to\n remove fit_intercept which is set True by default.\n\n .. deprecated:: 0.20\n The option is broken and deprecated. It will be removed in v0.22.\n\n Attributes\n ----------\n coef_ : array, shape (n_features,)\n parameter vector (w in the formulation formula)\n\n intercept_ : float\n independent term in decision function\n\n coef_path_ : array, shape (n_features, n_alphas)\n the varying values of the coefficients along the path\n\n alpha_ : float\n the estimated regularization parameter alpha\n\n alphas_ : array, shape (n_alphas,)\n the different values of alpha along the path\n\n cv_alphas_ : array, shape (n_cv_alphas,)\n all the values of alpha along the path for the different folds\n\n mse_path_ : array, shape (n_folds, n_cv_alphas)\n the mean square error on left-out for each fold along the path\n (alpha values given by ``cv_alphas``)\n\n n_iter_ : array-like or int\n the number of iterations run by Lars with the optimal alpha.\n\n Examples\n --------\n >>> from sklearn.linear_model import LarsCV\n >>> from sklearn.datasets import make_regression\n >>> X, y = make_regression(n_samples=200, noise=4.0, random_state=0)\n >>> reg = LarsCV(cv=5).fit(X, y)\n >>> reg.score(X, y) # doctest: +ELLIPSIS\n 0.9996...\n >>> reg.alpha_\n 0.0254...\n >>> reg.predict(X[:1,])\n array([154.0842...])\n\n See also\n --------\n lars_path, LassoLars, LassoLarsCV\n \"\"\"\n\n method = 'lar'\n\n def __init__(self, fit_intercept=True, verbose=False, max_iter=500,\n normalize=True, precompute='auto', cv='warn',\n max_n_alphas=1000, n_jobs=None, eps=np.finfo(np.float).eps,\n copy_X=True, positive=False):\n self.max_iter = max_iter\n self.cv = cv\n self.max_n_alphas = max_n_alphas\n self.n_jobs = n_jobs\n super().__init__(fit_intercept=fit_intercept,\n verbose=verbose, normalize=normalize,\n precompute=precompute,\n n_nonzero_coefs=500,\n eps=eps, copy_X=copy_X, fit_path=True,\n positive=positive)\n\n def fit(self, X, y):\n \"\"\"Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data.\n\n y : array-like, shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : object\n returns an instance of self.\n \"\"\"\n X, y = check_X_y(X, y, y_numeric=True)\n X = as_float_array(X, copy=self.copy_X)\n y = as_float_array(y, copy=self.copy_X)\n\n # init cross-validation generator\n cv = check_cv(self.cv, classifier=False)\n\n # As we use cross-validation, the Gram matrix is not precomputed here\n Gram = self.precompute\n if hasattr(Gram, '__array__'):\n warnings.warn(\"Parameter 'precompute' cannot be an array in \"\n \"%s. Automatically switch to 'auto' instead.\"\n % self.__class__.__name__)\n Gram = 'auto'\n\n cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(\n delayed(_lars_path_residues)(\n X[train], y[train], X[test], y[test], Gram=Gram, copy=False,\n method=self.method, verbose=max(0, self.verbose - 1),\n normalize=self.normalize, fit_intercept=self.fit_intercept,\n max_iter=self.max_iter, eps=self.eps, positive=self.positive)\n for train, test in cv.split(X, y))\n all_alphas = np.concatenate(list(zip(*cv_paths))[0])\n # Unique also sorts\n all_alphas = np.unique(all_alphas)\n # Take at most max_n_alphas values\n stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))\n all_alphas = all_alphas[::stride]\n\n mse_path = np.empty((len(all_alphas), len(cv_paths)))\n for index, (alphas, active, coefs, residues) in enumerate(cv_paths):\n alphas = alphas[::-1]\n residues = residues[::-1]\n if alphas[0] != 0:\n alphas = np.r_[0, alphas]\n residues = np.r_[residues[0, np.newaxis], residues]\n if alphas[-1] != all_alphas[-1]:\n alphas = np.r_[alphas, all_alphas[-1]]\n residues = np.r_[residues, residues[-1, np.newaxis]]\n this_residues = interpolate.interp1d(alphas,\n residues,\n axis=0)(all_alphas)\n this_residues **= 2\n mse_path[:, index] = np.mean(this_residues, axis=-1)\n\n mask = np.all(np.isfinite(mse_path), axis=-1)\n all_alphas = all_alphas[mask]\n mse_path = mse_path[mask]\n # Select the alpha that minimizes left-out error\n i_best_alpha = np.argmin(mse_path.mean(axis=-1))\n best_alpha = all_alphas[i_best_alpha]\n\n # Store our parameters\n self.alpha_ = best_alpha\n self.cv_alphas_ = all_alphas\n self.mse_path_ = mse_path\n\n # Now compute the full model\n # it will call a lasso internally when self if LassoLarsCV\n # as self.method == 'lasso'\n self._fit(X, y, max_iter=self.max_iter, alpha=best_alpha,\n Xy=None, fit_path=True)\n return self\n\n\nclass LassoLarsCV(LarsCV):\n \"\"\"Cross-validated Lasso, using the LARS algorithm.\n\n See glossary entry for :term:`cross-validation estimator`.\n\n The optimization objective for Lasso is::\n\n (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1\n\n Read more in the :ref:`User Guide <least_angle_regression>`.\n\n Parameters\n ----------\n fit_intercept : boolean\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n verbose : boolean or integer, optional\n Sets the verbosity amount\n\n max_iter : integer, optional\n Maximum number of iterations to perform.\n\n normalize : boolean, optional, default True\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n precompute : True | False | 'auto'\n Whether to use a precomputed Gram matrix to speed up\n calculations. If set to ``'auto'`` let us decide. The Gram matrix\n cannot be passed as argument since we will use only subsets of X.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n .. versionchanged:: 0.20\n ``cv`` default value if None will change from 3-fold to 5-fold\n in v0.22.\n\n max_n_alphas : integer, optional\n The maximum number of points on the path used to compute the\n residuals in the cross-validation\n\n n_jobs : int or None, optional (default=None)\n Number of CPUs to use during the cross validation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n eps : float, optional\n The machine-precision regularization in the computation of the\n Cholesky diagonal factors. Increase this for very ill-conditioned\n systems.\n\n copy_X : boolean, optional, default True\n If True, X will be copied; else, it may be overwritten.\n\n positive : boolean (default=False)\n Restrict coefficients to be >= 0. Be aware that you might want to\n remove fit_intercept which is set True by default.\n Under the positive restriction the model coefficients do not converge\n to the ordinary-least-squares solution for small values of alpha.\n Only coefficients up to the smallest alpha value (``alphas_[alphas_ >\n 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso\n algorithm are typically in congruence with the solution of the\n coordinate descent Lasso estimator.\n As a consequence using LassoLarsCV only makes sense for problems where\n a sparse solution is expected and/or reached.\n\n Attributes\n ----------\n coef_ : array, shape (n_features,)\n parameter vector (w in the formulation formula)\n\n intercept_ : float\n independent term in decision function.\n\n coef_path_ : array, shape (n_features, n_alphas)\n the varying values of the coefficients along the path\n\n alpha_ : float\n the estimated regularization parameter alpha\n\n alphas_ : array, shape (n_alphas,)\n the different values of alpha along the path\n\n cv_alphas_ : array, shape (n_cv_alphas,)\n all the values of alpha along the path for the different folds\n\n mse_path_ : array, shape (n_folds, n_cv_alphas)\n the mean square error on left-out for each fold along the path\n (alpha values given by ``cv_alphas``)\n\n n_iter_ : array-like or int\n the number of iterations run by Lars with the optimal alpha.\n\n Examples\n --------\n >>> from sklearn.linear_model import LassoLarsCV\n >>> from sklearn.datasets import make_regression\n >>> X, y = make_regression(noise=4.0, random_state=0)\n >>> reg = LassoLarsCV(cv=5).fit(X, y)\n >>> reg.score(X, y) # doctest: +ELLIPSIS\n 0.9992...\n >>> reg.alpha_\n 0.0484...\n >>> reg.predict(X[:1,])\n array([-77.8723...])\n\n Notes\n -----\n\n The object solves the same problem as the LassoCV object. However,\n unlike the LassoCV, it find the relevant alphas values by itself.\n In general, because of this property, it will be more stable.\n However, it is more fragile to heavily multicollinear datasets.\n\n It is more efficient than the LassoCV if only a small number of\n features are selected compared to the total number, for instance if\n there are very few samples compared to the number of features.\n\n See also\n --------\n lars_path, LassoLars, LarsCV, LassoCV\n \"\"\"\n\n method = 'lasso'\n\n def __init__(self, fit_intercept=True, verbose=False, max_iter=500,\n normalize=True, precompute='auto', cv='warn',\n max_n_alphas=1000, n_jobs=None, eps=np.finfo(np.float).eps,\n copy_X=True, positive=False):\n self.fit_intercept = fit_intercept\n self.verbose = verbose\n self.max_iter = max_iter\n self.normalize = normalize\n self.precompute = precompute\n self.cv = cv\n self.max_n_alphas = max_n_alphas\n self.n_jobs = n_jobs\n self.eps = eps\n self.copy_X = copy_X\n self.positive = positive\n # XXX : we don't use super().__init__\n # to avoid setting n_nonzero_coefs\n\n\nclass LassoLarsIC(LassoLars):\n \"\"\"Lasso model fit with Lars using BIC or AIC for model selection\n\n The optimization objective for Lasso is::\n\n (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1\n\n AIC is the Akaike information criterion and BIC is the Bayes\n Information criterion. Such criteria are useful to select the value\n of the regularization parameter by making a trade-off between the\n goodness of fit and the complexity of the model. A good model should\n explain well the data while being simple.\n\n Read more in the :ref:`User Guide <least_angle_regression>`.\n\n Parameters\n ----------\n criterion : 'bic' | 'aic'\n The type of criterion to use.\n\n fit_intercept : boolean\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n verbose : boolean or integer, optional\n Sets the verbosity amount\n\n normalize : boolean, optional, default True\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n precompute : True | False | 'auto' | array-like\n Whether to use a precomputed Gram matrix to speed up\n calculations. If set to ``'auto'`` let us decide. The Gram\n matrix can also be passed as argument.\n\n max_iter : integer, optional\n Maximum number of iterations to perform. Can be used for\n early stopping.\n\n eps : float, optional\n The machine-precision regularization in the computation of the\n Cholesky diagonal factors. Increase this for very ill-conditioned\n systems. Unlike the ``tol`` parameter in some iterative\n optimization-based algorithms, this parameter does not control\n the tolerance of the optimization.\n\n copy_X : boolean, optional, default True\n If True, X will be copied; else, it may be overwritten.\n\n positive : boolean (default=False)\n Restrict coefficients to be >= 0. Be aware that you might want to\n remove fit_intercept which is set True by default.\n Under the positive restriction the model coefficients do not converge\n to the ordinary-least-squares solution for small values of alpha.\n Only coefficients up to the smallest alpha value (``alphas_[alphas_ >\n 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso\n algorithm are typically in congruence with the solution of the\n coordinate descent Lasso estimator.\n As a consequence using LassoLarsIC only makes sense for problems where\n a sparse solution is expected and/or reached.\n\n\n Attributes\n ----------\n coef_ : array, shape (n_features,)\n parameter vector (w in the formulation formula)\n\n intercept_ : float\n independent term in decision function.\n\n alpha_ : float\n the alpha parameter chosen by the information criterion\n\n n_iter_ : int\n number of iterations run by lars_path to find the grid of\n alphas.\n\n criterion_ : array, shape (n_alphas,)\n The value of the information criteria ('aic', 'bic') across all\n alphas. The alpha which has the smallest information criterion is\n chosen. This value is larger by a factor of ``n_samples`` compared to\n Eqns. 2.15 and 2.16 in (Zou et al, 2007).\n\n\n Examples\n --------\n >>> from sklearn import linear_model\n >>> reg = linear_model.LassoLarsIC(criterion='bic')\n >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])\n ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,\n max_iter=500, normalize=True, positive=False, precompute='auto',\n verbose=False)\n >>> print(reg.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n [ 0. -1.11...]\n\n Notes\n -----\n The estimation of the number of degrees of freedom is given by:\n\n \"On the degrees of freedom of the lasso\"\n Hui Zou, Trevor Hastie, and Robert Tibshirani\n Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.\n\n https://en.wikipedia.org/wiki/Akaike_information_criterion\n https://en.wikipedia.org/wiki/Bayesian_information_criterion\n\n See also\n --------\n lars_path, LassoLars, LassoLarsCV\n \"\"\"\n def __init__(self, criterion='aic', fit_intercept=True, verbose=False,\n normalize=True, precompute='auto', max_iter=500,\n eps=np.finfo(np.float).eps, copy_X=True, positive=False):\n self.criterion = criterion\n self.fit_intercept = fit_intercept\n self.positive = positive\n self.max_iter = max_iter\n self.verbose = verbose\n self.normalize = normalize\n self.copy_X = copy_X\n self.precompute = precompute\n self.eps = eps\n self.fit_path = True\n\n def fit(self, X, y, copy_X=True):\n \"\"\"Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n training data.\n\n y : array-like, shape (n_samples,)\n target values. Will be cast to X's dtype if necessary\n\n copy_X : boolean, optional, default True\n If ``True``, X will be copied; else, it may be overwritten.\n\n Returns\n -------\n self : object\n returns an instance of self.\n \"\"\"\n X, y = check_X_y(X, y, y_numeric=True)\n\n X, y, Xmean, ymean, Xstd = LinearModel._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X)\n max_iter = self.max_iter\n\n Gram = self.precompute\n\n alphas_, active_, coef_path_, self.n_iter_ = lars_path(\n X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,\n method='lasso', verbose=self.verbose, max_iter=max_iter,\n eps=self.eps, return_n_iter=True, positive=self.positive)\n\n n_samples = X.shape[0]\n\n if self.criterion == 'aic':\n K = 2 # AIC\n elif self.criterion == 'bic':\n K = log(n_samples) # BIC\n else:\n raise ValueError('criterion should be either bic or aic')\n\n R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals\n mean_squared_error = np.mean(R ** 2, axis=0)\n sigma2 = np.var(y)\n\n df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom\n for k, coef in enumerate(coef_path_.T):\n mask = np.abs(coef) > np.finfo(coef.dtype).eps\n if not np.any(mask):\n continue\n # get the number of degrees of freedom equal to:\n # Xc = X[:, mask]\n # Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs\n df[k] = np.sum(mask)\n\n self.alphas_ = alphas_\n eps64 = np.finfo('float64').eps\n self.criterion_ = (n_samples * mean_squared_error / (sigma2 + eps64) +\n K * df) # Eqns. 2.15--16 in (Zou et al, 2007)\n n_best = np.argmin(self.criterion_)\n\n self.alpha_ = alphas_[n_best]\n self.coef_ = coef_path_[:, n_best]\n self._set_intercept(Xmean, ymean, Xstd)\n return self\n", "import os\nimport shutil\nimport tempfile\nimport warnings\nimport numpy\nfrom pickle import loads\nfrom pickle import dumps\nfrom functools import partial\n\nimport pytest\n\nfrom sklearn.datasets import get_data_home\nfrom sklearn.datasets import clear_data_home\nfrom sklearn.datasets import load_files\nfrom sklearn.datasets import load_sample_images\nfrom sklearn.datasets import load_sample_image\nfrom sklearn.datasets import load_digits\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.datasets import load_linnerud\nfrom sklearn.datasets import load_iris\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.datasets import load_boston\nfrom sklearn.datasets import load_wine\nfrom sklearn.datasets.base import Bunch\nfrom sklearn.datasets.tests.test_common import check_return_X_y\n\nfrom sklearn.externals._pilutil import pillow_installed\n\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_raises\n\n\ndef _remove_dir(path):\n if os.path.isdir(path):\n shutil.rmtree(path)\n\n\[email protected](scope=\"module\")\ndef data_home(tmpdir_factory):\n tmp_file = str(tmpdir_factory.mktemp(\"scikit_learn_data_home_test\"))\n yield tmp_file\n _remove_dir(tmp_file)\n\n\[email protected](scope=\"module\")\ndef load_files_root(tmpdir_factory):\n tmp_file = str(tmpdir_factory.mktemp(\"scikit_learn_load_files_test\"))\n yield tmp_file\n _remove_dir(tmp_file)\n\n\[email protected]\ndef test_category_dir_1(load_files_root):\n test_category_dir1 = tempfile.mkdtemp(dir=load_files_root)\n sample_file = tempfile.NamedTemporaryFile(dir=test_category_dir1,\n delete=False)\n sample_file.write(b\"Hello World!\\n\")\n sample_file.close()\n yield str(test_category_dir1)\n _remove_dir(test_category_dir1)\n\n\[email protected]\ndef test_category_dir_2(load_files_root):\n test_category_dir2 = tempfile.mkdtemp(dir=load_files_root)\n yield str(test_category_dir2)\n _remove_dir(test_category_dir2)\n\n\ndef test_data_home(data_home):\n # get_data_home will point to a pre-existing folder\n data_home = get_data_home(data_home=data_home)\n assert_equal(data_home, data_home)\n assert os.path.exists(data_home)\n\n # clear_data_home will delete both the content and the folder it-self\n clear_data_home(data_home=data_home)\n assert not os.path.exists(data_home)\n\n # if the folder is missing it will be created again\n data_home = get_data_home(data_home=data_home)\n assert os.path.exists(data_home)\n\n\ndef test_default_empty_load_files(load_files_root):\n res = load_files(load_files_root)\n assert_equal(len(res.filenames), 0)\n assert_equal(len(res.target_names), 0)\n assert_equal(res.DESCR, None)\n\n\ndef test_default_load_files(test_category_dir_1, test_category_dir_2,\n load_files_root):\n res = load_files(load_files_root)\n assert_equal(len(res.filenames), 1)\n assert_equal(len(res.target_names), 2)\n assert_equal(res.DESCR, None)\n assert_equal(res.data, [b\"Hello World!\\n\"])\n\n\ndef test_load_files_w_categories_desc_and_encoding(\n test_category_dir_1, test_category_dir_2, load_files_root):\n category = os.path.abspath(test_category_dir_1).split('/').pop()\n res = load_files(load_files_root, description=\"test\",\n categories=category, encoding=\"utf-8\")\n assert_equal(len(res.filenames), 1)\n assert_equal(len(res.target_names), 1)\n assert_equal(res.DESCR, \"test\")\n assert_equal(res.data, [\"Hello World!\\n\"])\n\n\ndef test_load_files_wo_load_content(\n test_category_dir_1, test_category_dir_2, load_files_root):\n res = load_files(load_files_root, load_content=False)\n assert_equal(len(res.filenames), 1)\n assert_equal(len(res.target_names), 2)\n assert_equal(res.DESCR, None)\n assert_equal(res.get('data'), None)\n\n\ndef test_load_sample_images():\n try:\n res = load_sample_images()\n assert_equal(len(res.images), 2)\n assert_equal(len(res.filenames), 2)\n assert res.DESCR\n except ImportError:\n warnings.warn(\"Could not load sample images, PIL is not available.\")\n\n\ndef test_load_digits():\n digits = load_digits()\n assert_equal(digits.data.shape, (1797, 64))\n assert_equal(numpy.unique(digits.target).size, 10)\n\n # test return_X_y option\n check_return_X_y(digits, partial(load_digits))\n\n\ndef test_load_digits_n_class_lt_10():\n digits = load_digits(9)\n assert_equal(digits.data.shape, (1617, 64))\n assert_equal(numpy.unique(digits.target).size, 9)\n\n\ndef test_load_sample_image():\n try:\n china = load_sample_image('china.jpg')\n assert_equal(china.dtype, 'uint8')\n assert_equal(china.shape, (427, 640, 3))\n except ImportError:\n warnings.warn(\"Could not load sample images, PIL is not available.\")\n\n\ndef test_load_missing_sample_image_error():\n if pillow_installed:\n assert_raises(AttributeError, load_sample_image,\n 'blop.jpg')\n else:\n warnings.warn(\"Could not load sample images, PIL is not available.\")\n\n\ndef test_load_diabetes():\n res = load_diabetes()\n assert_equal(res.data.shape, (442, 10))\n assert res.target.size, 442\n assert_equal(len(res.feature_names), 10)\n assert res.DESCR\n\n # test return_X_y option\n check_return_X_y(res, partial(load_diabetes))\n\n\ndef test_load_linnerud():\n res = load_linnerud()\n assert_equal(res.data.shape, (20, 3))\n assert_equal(res.target.shape, (20, 3))\n assert_equal(len(res.target_names), 3)\n assert res.DESCR\n assert os.path.exists(res.data_filename)\n assert os.path.exists(res.target_filename)\n\n # test return_X_y option\n check_return_X_y(res, partial(load_linnerud))\n\n\ndef test_load_iris():\n res = load_iris()\n assert_equal(res.data.shape, (150, 4))\n assert_equal(res.target.size, 150)\n assert_equal(res.target_names.size, 3)\n assert res.DESCR\n assert os.path.exists(res.filename)\n\n # test return_X_y option\n check_return_X_y(res, partial(load_iris))\n\n\ndef test_load_wine():\n res = load_wine()\n assert_equal(res.data.shape, (178, 13))\n assert_equal(res.target.size, 178)\n assert_equal(res.target_names.size, 3)\n assert res.DESCR\n\n # test return_X_y option\n check_return_X_y(res, partial(load_wine))\n\n\ndef test_load_breast_cancer():\n res = load_breast_cancer()\n assert_equal(res.data.shape, (569, 30))\n assert_equal(res.target.size, 569)\n assert_equal(res.target_names.size, 2)\n assert res.DESCR\n assert os.path.exists(res.filename)\n\n # test return_X_y option\n check_return_X_y(res, partial(load_breast_cancer))\n\n\ndef test_load_boston():\n res = load_boston()\n assert_equal(res.data.shape, (506, 13))\n assert_equal(res.target.size, 506)\n assert_equal(res.feature_names.size, 13)\n assert res.DESCR\n assert os.path.exists(res.filename)\n\n # test return_X_y option\n check_return_X_y(res, partial(load_boston))\n\n\ndef test_loads_dumps_bunch():\n bunch = Bunch(x=\"x\")\n bunch_from_pkl = loads(dumps(bunch))\n bunch_from_pkl.x = \"y\"\n assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)\n\n\ndef test_bunch_pickle_generated_with_0_16_and_read_with_0_17():\n bunch = Bunch(key='original')\n # This reproduces a problem when Bunch pickles have been created\n # with scikit-learn 0.16 and are read with 0.17. Basically there\n # is a surprising behaviour because reading bunch.key uses\n # bunch.__dict__ (which is non empty for 0.16 Bunch objects)\n # whereas assigning into bunch.key uses bunch.__setattr__. See\n # https://github.com/scikit-learn/scikit-learn/issues/6196 for\n # more details\n bunch.__dict__['key'] = 'set from __dict__'\n bunch_from_pkl = loads(dumps(bunch))\n # After loading from pickle the __dict__ should have been ignored\n assert_equal(bunch_from_pkl.key, 'original')\n assert_equal(bunch_from_pkl['key'], 'original')\n # Making sure that changing the attr does change the value\n # associated with __getitem__ as well\n bunch_from_pkl.key = 'changed'\n assert_equal(bunch_from_pkl.key, 'changed')\n assert_equal(bunch_from_pkl['key'], 'changed')\n\n\ndef test_bunch_dir():\n # check that dir (important for autocomplete) shows attributes\n data = load_iris()\n assert \"data\" in dir(data)\n", "\"\"\"\nBase class for ensemble-based estimators.\n\"\"\"\n\n# Authors: Gilles Louppe\n# License: BSD 3 clause\n\nimport numpy as np\nimport numbers\n\nfrom ..base import clone\nfrom ..base import BaseEstimator\nfrom ..base import MetaEstimatorMixin\nfrom ..utils import check_random_state\nfrom ..utils._joblib import effective_n_jobs\nfrom abc import ABCMeta, abstractmethod\n\nMAX_RAND_SEED = np.iinfo(np.int32).max\n\n\ndef _set_random_states(estimator, random_state=None):\n \"\"\"Sets fixed random_state parameters for an estimator\n\n Finds all parameters ending ``random_state`` and sets them to integers\n derived from ``random_state``.\n\n Parameters\n ----------\n\n estimator : estimator supporting get/set_params\n Estimator with potential randomness managed by random_state\n parameters.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Notes\n -----\n This does not necessarily set *all* ``random_state`` attributes that\n control an estimator's randomness, only those accessible through\n ``estimator.get_params()``. ``random_state``s not controlled include\n those belonging to:\n\n * cross-validation splitters\n * ``scipy.stats`` rvs\n \"\"\"\n random_state = check_random_state(random_state)\n to_set = {}\n for key in sorted(estimator.get_params(deep=True)):\n if key == 'random_state' or key.endswith('__random_state'):\n to_set[key] = random_state.randint(MAX_RAND_SEED)\n\n if to_set:\n estimator.set_params(**to_set)\n\n\nclass BaseEnsemble(BaseEstimator, MetaEstimatorMixin, metaclass=ABCMeta):\n \"\"\"Base class for all ensemble classes.\n\n Warning: This class should not be used directly. Use derived classes\n instead.\n\n Parameters\n ----------\n base_estimator : object, optional (default=None)\n The base estimator from which the ensemble is built.\n\n n_estimators : integer\n The number of estimators in the ensemble.\n\n estimator_params : list of strings\n The list of attributes to use as parameters when instantiating a\n new base estimator. If none are given, default parameters are used.\n\n Attributes\n ----------\n base_estimator_ : estimator\n The base estimator from which the ensemble is grown.\n\n estimators_ : list of estimators\n The collection of fitted base estimators.\n \"\"\"\n\n @abstractmethod\n def __init__(self, base_estimator, n_estimators=10,\n estimator_params=tuple()):\n # Set parameters\n self.base_estimator = base_estimator\n self.n_estimators = n_estimators\n self.estimator_params = estimator_params\n\n # Don't instantiate estimators now! Parameters of base_estimator might\n # still change. Eg., when grid-searching with the nested object syntax.\n # self.estimators_ needs to be filled by the derived classes in fit.\n\n def _validate_estimator(self, default=None):\n \"\"\"Check the estimator and the n_estimator attribute, set the\n `base_estimator_` attribute.\"\"\"\n if not isinstance(self.n_estimators, (numbers.Integral, np.integer)):\n raise ValueError(\"n_estimators must be an integer, \"\n \"got {0}.\".format(type(self.n_estimators)))\n\n if self.n_estimators <= 0:\n raise ValueError(\"n_estimators must be greater than zero, \"\n \"got {0}.\".format(self.n_estimators))\n\n if self.base_estimator is not None:\n self.base_estimator_ = self.base_estimator\n else:\n self.base_estimator_ = default\n\n if self.base_estimator_ is None:\n raise ValueError(\"base_estimator cannot be None\")\n\n def _make_estimator(self, append=True, random_state=None):\n \"\"\"Make and configure a copy of the `base_estimator_` attribute.\n\n Warning: This method should be used to properly instantiate new\n sub-estimators.\n \"\"\"\n estimator = clone(self.base_estimator_)\n estimator.set_params(**dict((p, getattr(self, p))\n for p in self.estimator_params))\n\n if random_state is not None:\n _set_random_states(estimator, random_state)\n\n if append:\n self.estimators_.append(estimator)\n\n return estimator\n\n def __len__(self):\n \"\"\"Returns the number of estimators in the ensemble.\"\"\"\n return len(self.estimators_)\n\n def __getitem__(self, index):\n \"\"\"Returns the index'th estimator in the ensemble.\"\"\"\n return self.estimators_[index]\n\n def __iter__(self):\n \"\"\"Returns iterator over estimators in the ensemble.\"\"\"\n return iter(self.estimators_)\n\n\ndef _partition_estimators(n_estimators, n_jobs):\n \"\"\"Private function used to partition estimators between jobs.\"\"\"\n # Compute the number of jobs\n n_jobs = min(effective_n_jobs(n_jobs), n_estimators)\n\n # Partition estimators between jobs\n n_estimators_per_job = np.full(n_jobs, n_estimators // n_jobs,\n dtype=np.int)\n n_estimators_per_job[:n_estimators % n_jobs] += 1\n starts = np.cumsum(n_estimators_per_job)\n\n return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()\n" ]
[ [ "numpy.dot", "scipy.linalg.svd", "numpy.sqrt", "numpy.asarray", "numpy.any", "numpy.var", "scipy.sparse.linalg.lsqr", "numpy.ones_like", "scipy.sparse.issparse", "scipy.sparse.linalg.cg", "scipy.linalg.lstsq", "numpy.atleast_1d", "scipy.linalg.eigh", "scipy.sparse.linalg.aslinearoperator", "numpy.argmax", "numpy.outer", "numpy.repeat", "scipy.linalg.solve", "numpy.zeros", "scipy.sparse.linalg.LinearOperator", "numpy.ones", "numpy.shape", "numpy.empty" ], [ "sklearn.utils.testing.assert_raises_regex", "sklearn.utils.mocking.MockDataFrame", "sklearn.utils.testing.assert_raises", "pandas.DataFrame", "sklearn.utils.deprecated", "sklearn.utils.gen_even_slices", "sklearn.utils.testing.SkipTest", "numpy.arange", "sklearn.utils.testing.assert_no_warnings", "sklearn.utils.safe_mask", "sklearn.utils.testing.assert_warns_message", "sklearn.utils.column_or_1d", "numpy.float32", "numpy.ravel", "sklearn.utils.is_scalar_nan", "scipy.sparse.csr_matrix", "sklearn.utils.testing.assert_array_equal", "numpy.array", "numpy.random.RandomState", "sklearn.utils.testing.assert_equal", "sklearn.config_context", "sklearn.utils.shuffle", "numpy.float64", "sklearn.utils.safe_indexing", "sklearn.utils.resample", "sklearn.utils.check_random_state", "numpy.float" ], [ "numpy.dot", "scipy.linalg.get_blas_funcs", "numpy.resize", "numpy.sqrt", "numpy.fabs", "numpy.mean", "numpy.argmin", "numpy.zeros_like", "numpy.any", "numpy.var", "numpy.where", "scipy.linalg.solve_triangular", "numpy.ones_like", "numpy.unique", "numpy.arange", "numpy.finfo", "numpy.flatnonzero", "numpy.argmax", "scipy.interpolate.interp1d", "scipy.linalg.lapack.get_lapack_funcs", "numpy.zeros", "numpy.delete", "numpy.append", "numpy.array", "numpy.sum", "numpy.abs", "numpy.isfinite", "numpy.sign", "numpy.empty" ], [ "sklearn.utils.testing.assert_equal", "sklearn.datasets.load_breast_cancer", "sklearn.datasets.load_sample_images", "numpy.unique", "sklearn.datasets.load_linnerud", "sklearn.datasets.load_sample_image", "sklearn.datasets.load_wine", "sklearn.datasets.load_iris", "sklearn.datasets.load_diabetes", "sklearn.datasets.load_files", "sklearn.datasets.base.Bunch", "sklearn.datasets.get_data_home", "sklearn.utils.testing.assert_raises", "sklearn.datasets.load_digits", "sklearn.datasets.load_boston", "sklearn.datasets.clear_data_home" ], [ "numpy.cumsum", "numpy.iinfo", "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Easonyesheng/StereoCameraToolk
[ "9319b7f4e5ce36833de722a15e1074e82b8b4f84", "9319b7f4e5ce36833de722a15e1074e82b8b4f84" ]
[ "models/ModelUtil/util.py", "Code_V1/RecStereo.py" ]
[ "\"\"\"Utility \"\"\"\n\nimport numpy as np\nimport cv2\nimport os\nimport logging\n\ndef check_string_is_empty(string):\n \"\"\"name\n check string empty or not\n Args: \n\n Returns:\n\n \"\"\"\n if string == '':\n return True\n\n return False\n\ndef check_numpy_array(array):\n \"\"\"name\n check array empty or not\n Args: \n\n Returns:\n True - Exist\n \"\"\"\n try:\n array.all()\n except AttributeError:\n return False\n \n return True\n\ndef after_cv_imshow():\n \"\"\"name\n\n close all the show window if press 'esc'\n set after cv2.imshow()\n\n Args:\n\n Returns:\n\n \"\"\"\n k = cv2.waitKey(0)\n if k == 27:\n cv2.destroyAllWindows()\n\ndef save_img_with_prefix(img, path, name):\n \"\"\"name\n\n save as 'path/name.jpg'\n\n Args:\n\n Returns:\n\n \"\"\"\n cv2.imwrite(os.path.join(path,name+'.jpg'), img)\n\ndef img_show(img, name):\n \"\"\"\n \"\"\"\n cv2.startWindowThread()\n img = img / np.max(img)\n cv2.imshow(name, img)\n after_cv_imshow()\n\ndef test_dir_if_not_create(path):\n \"\"\"name\n\n save as 'path/name.jpg'\n\n Args:\n\n Returns:\n \"\"\"\n if os.path.isdir(path):\n return True\n else:\n print('Create New Folder:', path)\n os.makedirs(path)\n return True\n\ndef log_init(logfilename):\n \"\"\"name\n\n save as 'path/name.jpg'\n\n Args:\n\n Returns:\n \"\"\"\n # logging.basicConfig(filename=logfilename, level=logging.INFO)\n # logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n # filename=logfilename,\n # level=logging.DEBUG)\n\n logger = logging.getLogger() # 不加名称设置root logger\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s: - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n # 使用FileHandler输出到文件\n fh = logging.FileHandler(logfilename, 'w')\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n\n # 使用StreamHandler输出到屏幕\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n\n # 添加两个Handler\n logger.addHandler(ch)\n logger.addHandler(fh)", "\"\"\"Rectify\"\"\"\n# [9.768250931346025e-09,-2.500754865249672e-07,-9.548449895853595e-04;-6.860072261347790e-07,-2.777662381107611e-07,0.106495821734710;0.001526302816024,-0.106009349648440,-1.225012201964717]\nfrom Main import SelfCalibration\nimport cv2\nimport numpy as np\nimport time\n\nImgPath = ''\nParaPath = ''\nSavePath = '/Users/zhangyesheng/Desktop/Research/GraduationDesign/StereoVision/StereoCamera/Res/Rect'\nSavePrefix = '1_1'\n\n\nRect = SelfCalibration(ImgPath,ParaPath,SavePath,SavePrefix)\n\n# load imgs\nleft_name = '/Users/zhangyesheng/Downloads/stereotest_1201/left/left0050.jpg'\nright_name = '/Users/zhangyesheng/Downloads/stereotest_1201/right/right0050.jpg'\nRect.load_image_pair(left_name, right_name)\n\n# load F\nF = np.array([3.859384665011900e-07,1.174750025819467e-05,0.001531730508105,2.221824335066730e-06,5.242290366570594e-07,0.273809471444167,-0.008896052858825,-0.280564089701346,3.309041896528640])\nF.resize((3,3))\nRect.FE = F\nRect.F = F\n# Rect.Kl = np.array([2.322705987395302e+03,0,0,-0.167747844550470,2.320091628959279e+03,0,7.307396133519883e+02,5.631600717557235e+02,1])\n# Rect.Kl.resize((3,3))\n# Rect.Kr = np.array([2.333911905221733e+03,0,0,-2.581942636556887,2.331132299259980e+03,0,7.523197258658565e+02,5.591791897938467e+02,1])\n# Rect.Kr.resize((3,3))\n# Rect.dl = np.array([-0.003088019278731,9.051004572184039e-04,-0.082022534117694,0.985042599989443,-12.842760788832772])\n# Rect.dl.resize((1,5))\n# Rect.dr = np.array([-0.003387135997683,-0.002453459880741,-0.058123299595020,-0.475182841732686,6.312811785143997])\n# Rect.dr.resize((1,5))\n# Rect.R_stereo = np.array([0.999787587893149,-5.630929053392975e-04,-0.020602476093372,4.378413846965960e-04,0.999981399817143,-0.006083445953518,0.020605518428805,0.006073133139285,0.999769238206574])\n# Rect.R_stereo.resize((3,3))\n# Rect.R_stereo.T\n\n# a1 = -2.467541498923884e+02\n# a2 = -2.532822135256471\n# a3 = 1.369568704851006\n# Rect.T_stereo = np.array([a1,a2,a3])\n# Rect.T_stereo.resize((3))\n\n\n# # get matching points\nstart = time.time()\nRect.ExactGoodMatch()\nend = time.time()\nprint('Matching points time: %f'%(end-start))\n# Rect.Show()\n# Rectify\n\nRect.RectifyImgUncalibrated()\n# Rect.RectifyImg()\n" ]
[ [ "numpy.max" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DewMaple/opencv-learning
[ "51991a5b9badf24cda740c1377f6be30dea91e1d" ]
[ "lesson_4_transformation/lesson_4_affine_transformation.py" ]
[ "import cv2\nimport numpy as np\n\nfrom utils import find_image\n\nimage_path = find_image('girls_01.jpg')\nimg = cv2.imread(image_path)\nrows, cols, channel = img.shape\n\npts_src = np.float32([[50, 50], [200, 50], [50, 200]])\npts_dst = np.float32([[10, 100], [200, 80], [100, 650]])\n\nM = cv2.getAffineTransform(pts_src, pts_dst)\nres = cv2.warpAffine(img, M, (cols, rows))\ncv2.imshow('transformation by three points', res)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wichtounet/frameworks
[ "e0cac9d4ffbbf0b1e9d2491eb70bf2c6154f313b" ]
[ "tf/experiment6.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\n\nimport argparse\nimport gzip\nimport os\nimport sys\nimport time\n\nimport os\nimport math\nimport numpy\nfrom PIL import Image\n\nimport numpy\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nbatch_size = 128\nbatches = 10009\nnum_epochs = 5\nnum_classes = 1000\n\nFLAGS = None\n\nfrom urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nimport tarfile\nimport pickle\n\ndef data_type():\n return tf.float32\n\ndef get_batch():\n index = 0\n\n global current_index\n global training_images\n global training_labels\n\n B = numpy.zeros(shape=(batch_size, 256, 256, 3))\n L = numpy.zeros(shape=(batch_size))\n\n while index < batch_size:\n try:\n img = load_img(training_images[current_index])\n B[index] = img_to_array(img)\n B[index] /= 255\n\n L[index] = training_labels[current_index]\n\n index = index + 1\n current_index = current_index + 1\n except:\n print(\"Ignore image {}\".format(training_images[current_index]))\n current_index = current_index + 1\n\n return B, keras.utils.to_categorical(L, num_classes)\n\ndef main(_):\n global current_index\n global training_images\n global training_labels\n\n label_counter = 0\n\n training_images = []\n training_labels = []\n\n for subdir, dirs, files in os.walk('/data/datasets/imagenet_resized/train/'):\n for folder in dirs:\n for folder_subdir, folder_dirs, folder_files in os.walk(os.path.join(subdir, folder)):\n for file in folder_files:\n training_images.append(os.path.join(folder_subdir, file))\n training_labels.append(label_counter)\n\n label_counter = label_counter + 1\n\n nice_n = math.floor(len(training_images) / batch_size) * batch_size\n\n print(nice_n)\n print(len(training_images))\n print(len(training_labels))\n\n import random\n\n perm = list(range(len(training_images)))\n random.shuffle(perm)\n training_images = [training_images[index] for index in perm]\n training_labels = [training_labels[index] for index in perm]\n\n print(\"Data is ready...\")\n\n train_data_node = tf.placeholder(data_type(), shape=(batch_size, 256, 256, 3))\n train_labels_node = tf.placeholder(tf.int64, shape=(batch_size,1000))\n\n # Convolutional weights\n conv1_weights = tf.Variable(tf.truncated_normal([3, 3, 3, 16], stddev=0.1, dtype=data_type()))\n conv1_biases = tf.Variable(tf.zeros([16], dtype=data_type()))\n conv2_weights = tf.Variable(tf.truncated_normal([3, 3, 16, 16], stddev=0.1, dtype=data_type()))\n conv2_biases = tf.Variable(tf.zeros([16], dtype=data_type()))\n conv3_weights = tf.Variable(tf.truncated_normal([3, 3, 16, 32], stddev=0.1, dtype=data_type()))\n conv3_biases = tf.Variable(tf.zeros([32], dtype=data_type()))\n conv4_weights = tf.Variable(tf.truncated_normal([3, 3, 32, 32], stddev=0.1, dtype=data_type()))\n conv4_biases = tf.Variable(tf.zeros([32], dtype=data_type()))\n conv5_weights = tf.Variable(tf.truncated_normal([3, 3, 32, 32], stddev=0.1, dtype=data_type()))\n conv5_biases = tf.Variable(tf.zeros([32], dtype=data_type()))\n\n # Fully connected weights\n fc1_weights = tf.Variable(tf.truncated_normal([2048, 2048], stddev=0.1, dtype=data_type()))\n fc1_biases = tf.Variable(tf.constant(0.1, shape=[2048], dtype=data_type()))\n fc2_weights = tf.Variable(tf.truncated_normal([2048, 1000], stddev=0.1, dtype=data_type()))\n fc2_biases = tf.Variable(tf.constant(0.1, shape=[1000], dtype=data_type()))\n\n def model(data):\n # Conv 1\n conv = tf.nn.conv2d(data, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')\n relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))\n pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Conv 2\n conv = tf.nn.conv2d(pool, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')\n relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))\n pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Conv 3\n conv = tf.nn.conv2d(pool, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')\n relu = tf.nn.relu(tf.nn.bias_add(conv, conv3_biases))\n pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Conv 4\n conv = tf.nn.conv2d(pool, conv4_weights, strides=[1, 1, 1, 1], padding='SAME')\n relu = tf.nn.relu(tf.nn.bias_add(conv, conv4_biases))\n pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Conv 5\n conv = tf.nn.conv2d(pool, conv5_weights, strides=[1, 1, 1, 1], padding='SAME')\n relu = tf.nn.relu(tf.nn.bias_add(conv, conv5_biases))\n pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Fully Connected\n reshape = tf.reshape(pool, [batch_size, 2048])\n hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)\n\n return tf.matmul(hidden, fc2_weights) + fc2_biases\n\n # Training computation: logits + cross-entropy loss.\n logits = model(train_data_node)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = train_labels_node))\n\n # Use simple momentum for the optimization.\n optimizer = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9).minimize(loss)\n\n acc_pred = tf.equal(tf.argmax(logits,1), tf.argmax(train_labels_node,1))\n accuracy = tf.reduce_mean(tf.cast(acc_pred, tf.float32))\n\n # Predictions for the current training minibatch.\n # train_prediction = tf.nn.softmax(logits)\n\n # Create a local session to run the training.\n with tf.Session() as sess:\n # Run all the initializers to prepare the trainable parameters.\n tf.global_variables_initializer().run(session = sess)\n print('Initialized!')\n\n for epoch in range(0, num_epochs):\n current_index = 0\n\n while current_index + batch_size < len(training_images):\n start_time = time.time()\n\n b, l = get_batch()\n\n feed_dict = {train_data_node: b, train_labels_node: l}\n\n # Run the optimizer to update weights.\n _, batch_loss, batch_accuracy = sess.run([optimizer, loss, accuracy], feed_dict=feed_dict)\n\n end_time = time.time()\n\n print('batch {}/{} loss: {} accuracy: {} duration: {}ms'.format(int(current_index / batch_size), int(nice_n / batch_size), batch_loss, batch_accuracy, 1000 * (end_time - start_time)), flush = True)\n\n print('epoch {}/{}'.format(epoch, num_epochs))\n\n # Finally print the result!\n\n current_index = 0\n acc = 0.0\n\n while current_index + batch_size < len(training_images):\n b, l = get_batch()\n\n feed_dict = {train_data_node: b, train_labels_node: l}\n [batch_accuracy] = sess.run([accuracy], feed_dict=feed_dict)\n print('Test batch accuracy:', batch_accuracy, flush = True)\n\n acc += batch_accuracy\n\n acc /= batches\n\n print('Test accuracy: %.1f%%' % acc)\n\ntf.app.run(main=main, argv=[sys.argv[0]])\n" ]
[ [ "tensorflow.nn.bias_add", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.matmul", "tensorflow.nn.max_pool", "tensorflow.reshape", "tensorflow.cast", "tensorflow.placeholder", "tensorflow.app.run", "tensorflow.global_variables_initializer", "tensorflow.train.MomentumOptimizer", "tensorflow.Session", "tensorflow.argmax", "numpy.zeros", "tensorflow.nn.conv2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
schrammlb2/policy-guided-sst
[ "8dce6619b9c771c39915c60fe9c54270ea1e621e" ]
[ "HER_mod/rl_modules/get_path_costs.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nimport os\nfrom scipy import stats\nfrom HER_mod.rl_modules.tsp import generate_path\nfrom HER_mod.rl_modules.hyperparams import NUM_GOALS, NUM_AGENTS\n\ngd_step_list = [0,2,5, 10, 20, 40]\n# NUM_AGENTS = 3\nN=200\n\ndef get_path_costs(train_pos_agent, train_vel_agent, perm_search=True):\n pos_run_time_list = []\n vel_run_time_list = []\n # gd_step_list = [0,5,10]\n num_agents = NUM_AGENTS\n num_goals=NUM_GOALS\n n=N\n # gd_step_list = [0,1]\n # num_agents = 2\n # num_goals=2\n # n=2\n pos_time_list = []\n vel_time_list = []\n for _ in range(num_agents):\n pos_agent = train_pos_agent()\n vel_agent = train_vel_agent()\n pos_agent_time_list = []\n vel_agent_time_list = []\n for i in range(n):\n # goals = [np.random.rand(2)*2-1 for i in range(num_goals)]\n # pos = np.random.rand(2)*2-1\n\n goals = generate_path(num_goals + 1)\n pos = goals[0]\n goals = goals[1:-1]\n # pos_agent_time_list = []\n min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.find_shortest_path(pos, goals, gd_steps=0, perm_search=perm_search)\n pos_test_time_list = [len(min_trajectory)]*len(gd_step_list)\n pos_agent_time_list.append(pos_test_time_list)\n\n\n vel_test_time_list = []\n for gd_steps in gd_step_list:\n min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, perm_search=perm_search)\n vel_test_time_list.append(len(min_trajectory))\n vel_agent_time_list.append(vel_test_time_list)\n \n pos_time_list.append(pos_agent_time_list)\n vel_time_list.append(vel_agent_time_list)\n \n vel_time_list = np.array(vel_time_list).squeeze()\n pos_time_list = np.array(pos_time_list).squeeze()\n\n relative_time_change = (vel_time_list-pos_time_list)/pos_time_list\n relative_time_change = np.mean(relative_time_change, axis=1)\n\n try:\n pickle.dump(vel_time_list, open(\"velocity_target.pkl\", 'wb'))\n pickle.dump(pos_time_list, open(\"no_velocity_target.pkl\", 'wb'))\n pickle.dump(relative_time_change, open(\"relative_time_change.pkl\", 'wb'))\n except:\n print(\"pickle failure\")\n import pdb\n pdb.set_trace()\n\n mean = relative_time_change.mean(axis=0)\n t_score = stats.t.ppf(.975, num_agents)\n ci = t_score*relative_time_change.std(axis=0)/(num_agents**.5)\n steps = np.array(gd_step_list)\n\n plt.plot(steps, mean)\n plt.fill_between(steps, mean+ci, mean-ci, alpha=.4)\n plt.xlabel(\"Gradient steps\")\n plt.ylabel(\"Relative Improvement vs standard HER\")\n plt.title(\"Relative Improvement\")\n plt.savefig(os.path.join('results', \"Relative Improvement\" + '.png'))\n plt.close()\n # import pdb\n # pdb.set_trace()\n\n\n\n# def method_comparison(train_pos_agent, train_vel_agent):\n# # method_list = ['random search', \"gradient descent\", \"gradient descent (40 steps)\", \"random\", \"0 velocity target\"]\n# method_list = ['random search', \"gradient descent\", \"random\", \"0 velocity target\"]\n\n# method_runtime_dict = {'greedy': []}\n# for method in method_list:\n# method_runtime_dict[method] = [] \n\n\n# num_agents = NUM_AGENTS\n# num_goals=NUM_GOALS\n# n=N\n\n# pos_time_list = []\n# vel_time_list = []\n# for _ in range(num_agents):\n# pos_agent = train_pos_agent()\n# vel_agent = train_vel_agent()\n\n# for method in method_runtime_dict.keys():\n# method_runtime_dict[method].append([])\n\n# for i in range(n):\n# # goals = [np.random.rand(2)*2-1 for i in range(num_goals)]\n# # pos = np.random.rand(2)*2-1\n# goals = generate_path(num_goals + 1)\n# pos = goals[0]\n# goals = goals[1:-1]\n# # pos_agent_time_list = []\n# min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.select_path(pos, goals, method=\"0 velocity target\")\n# # pos_test_time_list = [len(min_trajectory)]*len(gd_step_list)\n# method_runtime_dict['greedy'][-1].append(len(min_trajectory))\n\n\n# # vel_test_time_list = []\n# for method in method_list:\n# min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.select_path(pos, goals, method=method)\n# method_runtime_dict[method][-1].append(len(min_trajectory))\n# # vel_agent_time_list.append(vel_test_time_list)\n\n\n# greedy = method_runtime_dict['greedy']\n# method_runtime_dict = {method: np.array(method_runtime_dict[method]) for method in method_runtime_dict.keys()}\n# performance_dict = {method: (method_runtime_dict[method].mean(), 2*(method_runtime_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_runtime_dict.keys()}\n# relative_time_dict = {method: (method_runtime_dict[method] - greedy)/greedy for method in method_list}\n# improvement_dict = {method: (relative_time_dict[method].mean(), 2*(relative_time_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_list}\n\n\n# performance_list = [performance_dict[m][0] for m in method_runtime_dict.keys()]\n# performance_ci_list = [performance_dict[m][1] for m in method_runtime_dict.keys()]\n# relative_time_list = [improvement_dict[m][0] for m in method_list]\n# relative_time_ci_list = [improvement_dict[m][1] for m in method_list]\n\n\n\n\n# plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))\n# plt.xlabel(\"Method\")\n# plt.ylabel('Time to complete')\n# plt.title('Comparison of velocity target-setting methods')\n# plt.bar(range(len(performance_list)), performance_list, yerr=performance_ci_list) \n# plt.savefig(os.path.join('results', \"Method comparison -- Performance\" + '.png'))\n# plt.close()\n\n\n# plt.xticks(range(len(method_list)), method_list)\n# plt.xlabel(\"Method\")\n# plt.ylabel('Cost reduction over greedy baseline')\n# plt.title('Comparison of velocity target-setting methods')\n# plt.bar(range(len(relative_time_list)), relative_time_list, yerr=relative_time_ci_list) \n# plt.savefig(os.path.join('results', \"Method comparison -- Relative Improvement\" + '.png'))\n# plt.close()\n\n\n\ndef method_comparison(train_pos_agent, train_vel_agent):\n method_list = ['random search', \"gradient descent\", \"gradient descent (40 steps)\", \"random\", \"0 velocity target\"]\n # method_list = ['random search', \"gradient descent\", \"random\", \"0 velocity target\"]\n\n method_runtime_dict = {'greedy': []}\n for method in method_list:\n method_runtime_dict[method] = [] \n\n\n num_agents = NUM_AGENTS\n num_goals=NUM_GOALS\n n=N\n\n pos_time_list = []\n vel_time_list = []\n\n failed_counter_dict = {'greedy': 0}\n for method in method_list:\n failed_counter_dict[method] = 0\n\n\n for _ in range(num_agents):\n pos_agent = train_pos_agent()\n vel_agent = train_vel_agent()\n\n for method in method_runtime_dict.keys():\n method_runtime_dict[method].append([])\n\n for i in range(n):\n # goals = [np.random.rand(2)*2-1 for i in range(num_goals)]\n # pos = np.random.rand(2)*2-1\n goals = generate_path(num_goals + 1)\n pos = goals[0]\n goals = goals[1:-1]\n # pos_agent_time_list = []\n min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.select_path(pos, goals, method=\"0 velocity target\")\n # pos_test_time_list = [len(min_trajectory)]*len(gd_step_list)\n if successful: \n method_runtime_dict['greedy'][-1].append(len(min_trajectory))\n else: \n method_runtime_dict['greedy'][-1].append(\"NULL\")\n failed_counter_dict['greedy'] += 1\n\n\n # vel_test_time_list = []\n for method in method_list:\n min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.select_path(pos, goals, method=method)\n if successful: \n method_runtime_dict[method][-1].append(len(min_trajectory))\n else: \n method_runtime_dict[method][-1].append(\"NULL\")\n failed_counter_dict[method] += 1\n # vel_agent_time_list.append(vel_test_time_list)\n\n success_rates = {method: 1-failed_counter_dict[method]/(num_agents*n) for method in failed_counter_dict.keys()}\n\n greedy = method_runtime_dict['greedy']\n agent_performance_dict = {}\n mean_performance_dict = {}\n ci_performance_dict = {}\n\n improvement_dict = {}\n mean_improvement_dict = {}\n ci_improvement_dict = {}\n t_score = stats.t.ppf(.975, num_agents)\n\n\n for method in method_runtime_dict.keys(): \n agent_performance_dict[method] = [[time for time in agent_list if time != \"NULL\"] for agent_list in method_runtime_dict[method]]\n agent_performance_dict[method] = [sum(agent_list)/len(agent_list) for agent_list in agent_performance_dict[method]]\n mean = sum(agent_performance_dict[method])/len(agent_performance_dict[method])\n mean_performance_dict[method] = mean\n ci_performance_dict[method] = t_score*sum([(v-mean)**2 for v in agent_performance_dict[method]])**.5/len(agent_performance_dict[method])\n\n improvement_list = []\n mean_list = []\n for agent_ind in range(num_agents):\n agent_list = method_runtime_dict[method][agent_ind]\n greedy_list = greedy[agent_ind]\n improvement_list.append([(agent_list[i] - greedy_list[i])/greedy_list[i] for i in range(n) if (agent_list[i] != \"NULL\" and greedy_list[i]!= \"NULL\")])\n mean_list.append(sum(improvement_list[agent_ind])/len(improvement_list[agent_ind]))\n\n mean = sum(mean_list)/len(mean_list)\n mean_improvement_dict[method] = mean\n ci_improvement_dict[method] = t_score*sum([(v-mean)**2 for v in mean_list])**.5/len(mean_list)\n\n # agent_improvement_dict[method] = [[(time - greedy_time)/greedy_time for time in agent_list if time != \"NULL\"] for agent_list in method_runtime_dict[method]]\n # agent_performance_dict[method] = [sum(agent_list)/len(agent_list) for agent_list in agent_performance_dict[method]]\n # mean_performance_dict[method] = sum(agent_performance_dict[method])/len(agent_performance_dict[method])\n # ci_performance_dict[method] = 2*sum([(v-mean)**2 for v in agent_performance_dict[method]])**.5/len(agent_performance_dict[method])\n # method_runtime_dict = {method: np.array(method_runtime_dict[method]) for method in method_runtime_dict.keys()}\n\n # mean_performance_dict = {method: method_runtime_dict[method] for method in method_runtime_dict.keys()}\n # relative_time_dict = {method: (method_runtime_dict[method] - greedy)/greedy for method in method_list}\n # improvement_dict = {method: (relative_time_dict[method].mean(), 2*(relative_time_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_list}\n\n\n # greedy = method_runtime_dict['greedy']\n # method_runtime_dict = {method: np.array(method_runtime_dict[method]) for method in method_runtime_dict.keys()}\n # performance_dict = {method: (method_runtime_dict[method].mean(), 2*(method_runtime_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_runtime_dict.keys()}\n # relative_time_dict = {method: (method_runtime_dict[method] - greedy)/greedy for method in method_list}\n # improvement_dict = {method: (relative_time_dict[method].mean(), 2*(relative_time_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_list}\n\n\n performance_list = [mean_performance_dict[m] for m in method_runtime_dict.keys()]\n performance_ci_list = [ci_performance_dict[m] for m in method_runtime_dict.keys()]\n relative_time_list = [mean_improvement_dict[m] for m in method_list]\n relative_time_ci_list = [ci_improvement_dict[m] for m in method_list]\n\n sr_list = [success_rates[m] for m in method_runtime_dict.keys()]#method_list]\n\n\n # plt.xticks(range(len(method_list)), method_list)\n plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))\n plt.xlabel(\"Method\")\n plt.ylabel('Success rate')\n plt.title('Comparison of velocity target-setting methods')\n plt.bar(range(len(sr_list)), sr_list) \n plt.savefig(os.path.join('results', \"Method comparison -- Success Rate\" + '.png'))\n plt.close()\n\n\n plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))\n plt.xlabel(\"Method\")\n plt.ylabel('Time to complete')\n plt.title('Comparison of velocity target-setting methods')\n plt.bar(range(len(performance_list)), performance_list, yerr=performance_ci_list) \n plt.savefig(os.path.join('results', \"Method comparison -- Performance\" + '.png'))\n plt.close()\n\n\n plt.xticks(range(len(method_list)), method_list)\n plt.xlabel(\"Method\")\n plt.ylabel('Cost reduction over greedy baseline')\n plt.title('Comparison of velocity target-setting methods')\n plt.bar(range(len(relative_time_list)), relative_time_list, yerr=relative_time_ci_list) \n plt.savefig(os.path.join('results', \"Method comparison -- Relative Improvement\" + '.png'))\n plt.close()\n\n\n\n\ndef get_random_search_costs(train_vel_agent, perm_search=True):\n pos_run_time_list = []\n vel_run_time_list = []\n # gd_step_list = [0,5,10]\n num_agents = NUM_AGENTS\n num_goals=NUM_GOALS\n n=N\n # gd_step_list = [0,1]\n # num_agents = 2\n # num_goals=2\n # n=2\n rand_time_list = []\n gd_time_list = []\n for _ in range(num_agents):\n vel_agent = train_vel_agent()\n rand_search_time_list = []\n gd_search_time_list = []\n for i in range(n):\n # goals = [np.random.rand(2)*2-1 for i in range(num_goals)]\n # pos = np.random.rand(2)*2-1\n goals = generate_path(num_goals + 1)\n pos = goals[0]\n goals = goals[1:-1]\n\n rand_test_time_list = []\n gd_test_time_list = []\n for gd_steps in gd_step_list:\n # min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_start=True, perm_search=perm_search)\n min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_start=False, perm_search=perm_search)\n print(\"GD: \" + str(min_time))\n gd_test_time_list.append(len(min_trajectory))\n\n min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_search=True, perm_search=perm_search)\n print(\"random_search: \" + str(min_time))\n rand_test_time_list.append(len(min_trajectory))\n \n rand_search_time_list.append(rand_test_time_list)\n gd_search_time_list.append(gd_test_time_list)\n \n rand_time_list.append(rand_search_time_list)\n gd_time_list.append(gd_search_time_list)\n \n rand_time_list = np.array(rand_time_list).squeeze()\n gd_time_list = np.array(gd_time_list).squeeze()\n # best = np.minimum(rand_time_list.min(axis=2),gd_time_list.min(axis=2))\n\n relative_time_change = (gd_time_list-rand_time_list)/rand_time_list\n relative_time_change = np.mean(relative_time_change, axis=1)\n\n # try:\n # pickle.dump(vel_time_list, open(\"velocity_target.pkl\", 'wb'))\n # pickle.dump(pos_time_list, open(\"no_velocity_target.pkl\", 'wb'))\n # pickle.dump(relative_time_change, open(\"relative_time_change.pkl\", 'wb'))\n # except:\n # print(\"pickle failure\")\n # import pdb\n # pdb.set_trace()\n\n mean = relative_time_change.mean(axis=0)\n ci = 2*relative_time_change.std(axis=0)/(num_agents**.5)\n steps = np.array(gd_step_list)\n\n plt.plot(steps, mean)\n plt.fill_between(steps, mean+ci, mean-ci, alpha=.4)\n plt.xlabel(\"Gradient steps\")\n plt.ylabel(\"Relative Improvement vs random search\")\n plt.title(\"Relative Improvement vs random search\")\n plt.savefig(os.path.join('results', \"Improvement vs random search\" + '.png'))\n plt.close()\n\n\n t_score = stats.t.ppf(.975, num_agents)\n rands = rand_time_list.mean(axis=1)\n rand_mean = rands.mean(axis=0)\n rand_ci = t_score*rands.std(axis=0)/(num_agents**.5)\n\n gds = gd_time_list.mean(axis=1)\n gd_mean = gds.mean(axis=0)\n gd_ci = t_score*gds.std(axis=0)/(num_agents**.5)\n\n plt.plot(steps, rand_mean, color='red', label='Random Search')\n plt.fill_between(steps, rand_mean+rand_ci, rand_mean-rand_ci, alpha=.4, color='red')\n plt.plot(steps, gd_mean, color='blue', label='Gradient Descent')\n plt.fill_between(steps, gd_mean+gd_ci, gd_mean-gd_ci, alpha=.4, color='blue')\n plt.legend()\n plt.xlabel(\"Gradient steps\")\n plt.ylabel(\"Relative Improvement vs random search\")\n plt.title(\"Relative Improvement vs random search\")\n plt.savefig(os.path.join('results', \"Gradient Descent vs random search\" + '.png'))\n plt.close()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "scipy.stats.t.ppf", "matplotlib.pyplot.plot", "numpy.mean", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
StanczakDominik/arviz
[ "ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287", "ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287", "ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287" ]
[ "arviz/plots/backends/matplotlib/distplot.py", "arviz/plots/backends/bokeh/elpdplot.py", "examples/matplotlib/mpl_styles.py" ]
[ "\"\"\"Matplotlib distplot.\"\"\"\nimport warnings\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom . import backend_show\nfrom ...kdeplot import plot_kde\nfrom ...plot_utils import matplotlib_kwarg_dealiaser\nfrom ....numeric_utils import get_bins\n\n\ndef plot_dist(\n values,\n values2,\n color,\n kind,\n cumulative,\n label,\n rotated,\n rug,\n bw,\n quantiles,\n contour,\n fill_last,\n textsize,\n plot_kwargs,\n fill_kwargs,\n rug_kwargs,\n contour_kwargs,\n contourf_kwargs,\n pcolormesh_kwargs,\n hist_kwargs,\n ax,\n backend_kwargs,\n show,\n):\n \"\"\"Matplotlib distplot.\"\"\"\n if backend_kwargs is not None:\n warnings.warn(\n (\n \"Argument backend_kwargs has not effect in matplotlib.plot_dist\"\n \"Supplied value won't be used\"\n )\n )\n backend_kwargs = None\n if ax is None:\n ax = plt.gca()\n\n if kind == \"hist\":\n ax = _histplot_mpl_op(\n values=values, values2=values2, rotated=rotated, ax=ax, hist_kwargs=hist_kwargs\n )\n\n elif kind == \"kde\":\n plot_kwargs = matplotlib_kwarg_dealiaser(plot_kwargs, \"plot\")\n plot_kwargs.setdefault(\"color\", color)\n legend = label is not None\n\n ax = plot_kde(\n values,\n values2,\n cumulative=cumulative,\n rug=rug,\n label=label,\n bw=bw,\n quantiles=quantiles,\n rotated=rotated,\n contour=contour,\n legend=legend,\n fill_last=fill_last,\n textsize=textsize,\n plot_kwargs=plot_kwargs,\n fill_kwargs=fill_kwargs,\n rug_kwargs=rug_kwargs,\n contour_kwargs=contour_kwargs,\n contourf_kwargs=contourf_kwargs,\n pcolormesh_kwargs=pcolormesh_kwargs,\n ax=ax,\n backend=\"matplotlib\",\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend_show(show):\n plt.show()\n\n return ax\n\n\ndef _histplot_mpl_op(values, values2, rotated, ax, hist_kwargs):\n \"\"\"Add a histogram for the data to the axes.\"\"\"\n if values2 is not None:\n raise NotImplementedError(\"Insert hexbin plot here\")\n\n bins = hist_kwargs.pop(\"bins\")\n if bins is None:\n bins = get_bins(values)\n ax.hist(np.asarray(values).flatten(), bins=bins, **hist_kwargs)\n\n if rotated:\n ax.set_yticks(bins[:-1])\n else:\n ax.set_xticks(bins[:-1])\n if hist_kwargs.get(\"label\") is not None:\n ax.legend()\n return ax\n", "\"\"\"Bokeh ELPDPlot.\"\"\"\nimport warnings\n\nimport bokeh.plotting as bkp\nfrom bokeh.models.annotations import Title\nfrom bokeh.models import ColumnDataSource\nimport bokeh.models.markers as mk\nimport numpy as np\n\nfrom . import backend_kwarg_defaults\nfrom .. import show_layout\nfrom ...plot_utils import _scale_fig_size\nfrom ....rcparams import rcParams, _validate_bokeh_marker\n\n\ndef plot_elpd(\n ax,\n models,\n pointwise_data,\n numvars,\n figsize,\n textsize,\n plot_kwargs,\n markersize,\n xlabels,\n coord_labels,\n xdata,\n threshold,\n backend_kwargs,\n show,\n):\n \"\"\"Bokeh elpd plot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults((\"dpi\", \"plot.bokeh.figure.dpi\"),),\n **backend_kwargs,\n }\n dpi = backend_kwargs.pop(\"dpi\")\n if numvars == 2:\n (figsize, _, _, _, _, markersize) = _scale_fig_size(\n figsize, textsize, numvars - 1, numvars - 1\n )\n plot_kwargs.setdefault(\"s\", markersize)\n\n if ax is None:\n backend_kwargs.setdefault(\"width\", int(figsize[0] * dpi))\n backend_kwargs.setdefault(\"height\", int(figsize[1] * dpi))\n ax = bkp.figure(**backend_kwargs)\n ydata = pointwise_data[0] - pointwise_data[1]\n _plot_atomic_elpd(\n ax, xdata, ydata, *models, threshold, coord_labels, xlabels, True, True, plot_kwargs\n )\n\n show_layout(ax, show)\n\n else:\n max_plots = (\n numvars ** 2 if rcParams[\"plot.max_subplots\"] is None else rcParams[\"plot.max_subplots\"]\n )\n vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots)\n if vars_to_plot < numvars:\n warnings.warn(\n \"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number \"\n \"of resulting ELPD pairwise plots with these variables, generating only a \"\n \"{side}x{side} grid\".format(max_plots=max_plots, side=vars_to_plot),\n UserWarning,\n )\n numvars = vars_to_plot\n\n (figsize, _, _, _, _, markersize) = _scale_fig_size(\n figsize, textsize, numvars - 2, numvars - 2\n )\n plot_kwargs.setdefault(\"s\", markersize)\n\n if ax is None:\n ax = []\n for row in range(numvars - 1):\n ax_row = []\n for col in range(numvars - 1):\n if row == 0 and col == 0:\n ax_first = bkp.figure(\n width=int(figsize[0] / (numvars - 1) * dpi),\n height=int(figsize[1] / (numvars - 1) * dpi),\n **backend_kwargs\n )\n ax_row.append(ax_first)\n elif row < col:\n ax_row.append(None)\n else:\n ax_row.append(\n bkp.figure(\n width=int(figsize[0] / (numvars - 1) * dpi),\n height=int(figsize[1] / (numvars - 1) * dpi),\n x_range=ax_first.x_range,\n y_range=ax_first.y_range,\n **backend_kwargs\n )\n )\n ax.append(ax_row)\n ax = np.array(ax)\n\n for i in range(0, numvars - 1):\n var1 = pointwise_data[i]\n\n for j in range(0, numvars - 1):\n if j < i:\n continue\n\n var2 = pointwise_data[j + 1]\n ydata = var1 - var2\n _plot_atomic_elpd(\n ax[j, i],\n xdata,\n ydata,\n models[i],\n models[j + 1],\n threshold,\n coord_labels,\n xlabels,\n j == numvars - 2,\n i == 0,\n plot_kwargs,\n )\n\n show_layout(ax, show)\n\n return ax\n\n\ndef _plot_atomic_elpd(\n ax_,\n xdata,\n ydata,\n model1,\n model2,\n threshold,\n coord_labels,\n xlabels,\n xlabels_shown,\n ylabels_shown,\n plot_kwargs,\n):\n marker = _validate_bokeh_marker(plot_kwargs.get(\"marker\"))\n marker_func = getattr(mk, marker)\n sizes = np.ones(len(xdata)) * plot_kwargs.get(\"s\")\n glyph = marker_func(\n x=\"xdata\", y=\"ydata\", size=\"sizes\", line_color=plot_kwargs.get(\"color\", \"black\")\n )\n source = ColumnDataSource(dict(xdata=xdata, ydata=ydata, sizes=sizes))\n ax_.add_glyph(source, glyph)\n if threshold is not None:\n diff_abs = np.abs(ydata - ydata.mean())\n bool_ary = diff_abs > threshold * ydata.std()\n if coord_labels is None:\n coord_labels = xdata.astype(str)\n outliers = np.argwhere(bool_ary).squeeze()\n for outlier in outliers:\n label = coord_labels[outlier]\n ax_.text(\n x=np.asarray(outlier), y=np.asarray(ydata[outlier]), text=label, text_color=\"black\",\n )\n if ylabels_shown:\n ax_.yaxis.axis_label = \"ELPD difference\"\n else:\n ax_.yaxis.minor_tick_line_color = None\n ax_.yaxis.major_label_text_font_size = \"0pt\"\n\n if xlabels_shown:\n if xlabels:\n ax_.xaxis.ticker = np.arange(0, len(coord_labels))\n ax_.xaxis.major_label_overrides = {\n str(key): str(value)\n for key, value in zip(np.arange(0, len(coord_labels)), list(coord_labels))\n }\n else:\n ax_.xaxis.minor_tick_line_color = None\n ax_.xaxis.major_label_text_font_size = \"0pt\"\n title = Title()\n title.text = \"{} - {}\".format(model1, model2)\n ax_.title = title\n", "\"\"\"\nMatplotlib styles\n=================\n\n_thumb: .8, .8\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import stats\nimport arviz as az\n\nx = np.linspace(0, 1, 100)\ndist = stats.beta(2, 5).pdf(x)\n\nstyle_list = [\n \"default\",\n [\"default\", \"arviz-colors\"],\n \"arviz-darkgrid\",\n \"arviz-whitegrid\",\n \"arviz-white\",\n \"arviz-grayscale\",\n]\n\nfig = plt.figure(figsize=(12, 12))\nfor idx, style in enumerate(style_list):\n with az.style.context(style):\n ax = fig.add_subplot(3, 2, idx + 1, label=idx)\n for i in range(10):\n ax.plot(x, dist - i, f\"C{i}\", label=f\"C{i}\")\n ax.set_title(style)\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"f(x)\", rotation=0, labelpad=15)\n ax.legend(bbox_to_anchor=(1, 1))\nplt.tight_layout()\n\nplt.show()\n" ]
[ [ "numpy.asarray", "matplotlib.pyplot.gca", "matplotlib.pyplot.show" ], [ "numpy.asarray", "numpy.arange", "numpy.array", "numpy.argwhere" ], [ "matplotlib.pyplot.tight_layout", "numpy.linspace", "scipy.stats.beta", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhaocy14/SmartWalker
[ "b025a7b4a2b305838a22fe4e6116ddb951c4d7bf" ]
[ "Sensors/softskin.py" ]
[ "import serial\nimport serial.tools.list_ports\nimport numpy as np\nimport math\nimport threading\nimport re\nimport os\nimport sys\nimport time\nimport matplotlib.pyplot as plt\n\npwd = os.path.abspath(os.path.abspath(__file__))\nfather_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + \"..\")\nsys.path.append(father_path)\ndata_path = os.path.abspath(\n os.path.dirname(os.path.abspath(__file__)) + os.path.sep + \"..\" +\n os.path.sep + \"data\")\n\ndef print_serial(port):\n print(\"---------------[ %s ]---------------\" % port.name)\n print(\"Path: %s\" % port.device)\n print(\"Descript: %s\" % port.description)\n print(\"HWID: %s\" % port.hwid)\n if not None == port.manufacturer:\n print(\"Manufacture: %s\" % port.manufacturer)\n if not None == port.product:\n print(\"Product: %s\" % port.product)\n if not None == port.interface:\n print(\"Interface: %s\" % port.interface)\n print()\n\n\ndef detect_serials(location=\"1-1.1:1.0\", vid=0x10c4, pid=0xea60):\n ports = serial.tools.list_ports.comports()\n for port in ports:\n print_serial(port)\n\n if port.location.__contains__(location):\n port_path = port.device\n return port_path\n else:\n print(\"Cannot find the target device: %s\" % location)\n return None\n\n\nclass SoftSkin(object):\n\n def __init__(self, is_STM32: bool = True):\n\n port_name = detect_serials(\"1-1.3:1.0\") # Arduino Mega 2560 ttyACM0\n baud_rate = 115200\n print(port_name, baud_rate)\n self.serial = serial.Serial(port_name, baud_rate, timeout=None)\n self.pwd = os.path.abspath(os.path.abspath(__file__))\n self.father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + \"..\")\n self.serial = serial.Serial(port_name, baud_rate, timeout=None)\n self.raw_data = [] # 保存一帧数据\n self.base_data = [] # 建立一组基准值用于初始化\n self.temp_data = []\n self.port_num = 32\n self.average_length = 10\n self.average_buffer = np.zeros((self.average_length, self.port_num))\n\n # detect abnormal signal\n self.max_pressure = 0\n self.safe_change_rate = 10\n self.emergency_change_rate = 50\n self.detect_length = 10\n self.detect_buffer = np.zeros((self.detect_length, self.port_num))\n self.skin_unlock_event = threading.Event()\n self.skin_unlock_event.clear()\n\n self.build_base_line_data()\n pass\n\n def read_data(self, is_shown=1):\n try:\n one_line_data = self.serial.readline().decode(\"utf-8\")\n # print(one_line_data)\n one_line_data = one_line_data.strip('SS')\n one_line_data = one_line_data.strip('\\n')\n one_line_data = one_line_data.strip('\\r')\n one_line_data = one_line_data.split('|')\n # print(one_line_data)\n if is_shown == 1:\n print(one_line_data)\n if len(one_line_data) == self.port_num:\n one_line_data = list(map(float, one_line_data))\n one_line_data = list(map(int, one_line_data))\n self.raw_data = one_line_data\n # print(self.raw_data, type(self.raw_data), type(self.raw_data[0]))\n except BaseException as be:\n print(\"Data Error:\", be)\n\n def build_base_line_data(self, initial_size=10):\n \"\"\"\n expired, no use\n 1.建立一组基准数值\n 检测异常值\n 取平均值\n :return:\n not in use because the original signals are stable enough\n \"\"\"\n base_list = []\n for i in range(initial_size):\n self.read_data(0)\n if len(self.raw_data) == self.port_num:\n temp_raw_data = self.raw_data\n base_list += temp_raw_data\n mean_base_list = np.array(base_list).reshape([-1, self.port_num])\n add_col = np.ones(mean_base_list.shape[0]).reshape([1, -1])\n mean_base_list = add_col.dot(mean_base_list) / mean_base_list.shape[0]\n self.base_data = mean_base_list.tolist()[0]\n self.base_data = list(map(lambda x: int(x) - 1, self.base_data))\n print(\"base line data: \", self.base_data)\n pass\n\n def read_and_record(self, record=False, show=False, plot=False, plot_num=30):\n file_path = data_path + os.path.sep + \"Softskin.txt\"\n plot_array = np.zeros((plot_num, self.port_num))\n if record:\n file = open(file_path, 'w')\n while True:\n try:\n # self.serial.flushInput()\n self.read_data(0)\n if len(self.raw_data) == len(self.base_data):\n temp_data = np.array(self.raw_data) - np.array(self.base_data)\n if show:\n print(temp_data)\n print(self.max_pressure)\n if record:\n time_index = time.time()\n write_data = temp_data.tolist()\n write_data.insert(0, time_index)\n file.write(str(write_data) + '\\n')\n file.flush()\n self.temp_data = temp_data\n self.max_pressure = self.temp_data.max()\n self.detect_buffer[0:-1, :] = self.detect_buffer[1:self.detect_length, :]\n self.detect_buffer[-1, :] = np.array(self.temp_data)\n\n if plot:\n # plt.ion()\n plot_array[0:plot_num - 1, :] = plot_array[1:plot_num, :]\n plot_array[plot_num - 1, :] = np.array(temp_data)\n plt.clf()\n plt.xlabel('Time')\n plt.ylabel('pressure')\n plt.ylim((-10, 270))\n plt.plot(range(0, plot_num), plot_array)\n # plt.ioff()\n # plt.show()\n # plt.draw()\n plt.pause(0.0000000001)\n except BaseException as be:\n print(\"Data Error:\", be)\n\n def update_from_STM32(self, STM32_data: np.ndarray):\n try:\n self.raw_data = STM32_data\n except:\n pass\n\n def unlock(self):\n while True:\n change_rate = self.detect_buffer[-1, :] - self.detect_buffer[0, :]\n change_rate = change_rate.max()\n if self.safe_change_rate <= change_rate < self.emergency_change_rate:\n print(\"unlock!\")\n break\n time.sleep(0.1)\n\n\nif __name__ == '__main__':\n skin = SoftSkin()\n # skin.build_base_line_data()\n thread_reading = threading.Thread(target=skin.read_and_record, args=())\n\n time.sleep(1)\n thread_reading.start()\n\n skin.unlock()\n\n" ]
[ [ "matplotlib.pyplot.ylim", "numpy.ones", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.zeros", "matplotlib.pyplot.pause", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dvamossy/EmTract
[ "68a00e3d63fbc2c401b0d2b297bf96ffb75940e8" ]
[ "emtract/model_inference.py" ]
[ "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nfrom emtract.model import Model, ModelType\nimport pandas as pd\n\n\nclass ModelInference:\n\n MODEL_BASE_PATH = 'build/models/'\n DATA_BASE_PATH = './emtract/data/'\n\n def __init__(self, model_type):\n if model_type == 'twitter':\n self.model = Model(ModelType.TWITTER)\n else:\n self.model = Model(ModelType.STOCK_TWITS)\n\n def inference(self, text):\n return self.model.predict([text])\n\n def file_inference(self, file_name, output):\n df = pd.read_csv(file_name, header=None)\n predictions = self.model.predict(df.iloc[:, 0].values)\n predictions.to_csv(output, index=False)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
pleiades-s/PyTorch-tutorials-kr
[ "3d749ea2fe67363b5d46340b742308b744fa0419", "3d749ea2fe67363b5d46340b742308b744fa0419" ]
[ "docs/_downloads/d923ca53b1bfbeb3c222ae46d65d485e/transfer_learning_tutorial.py", "docs/_downloads/07d05907b3ff859aeed5f76f1acc5df4/Intro_to_TorchScript_tutorial.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n컴퓨터 비전(Vision)을 위한 전이학습(Transfer Learning)\n=======================================================\n**Author**: `Sasank Chilamkurthy <https://chsasank.github.io>`_\n **번역**: `박정환 <http://github.com/9bow>`_\n\n이 튜토리얼에서는 전이학습(Transfer Learning)을 이용하여 이미지 분류를 위한\n합성곱 신경망을 어떻게 학습시키는지 배워보겠습니다. 전이학습에 대해서는\n`CS231n 노트 <http://cs231n.github.io/transfer-learning/>`__ 에서 더 많은 내용을\n읽어보실 수 있습니다.\n\n위 노트를 인용해보면,\n\n 실제로 충분한 크기의 데이터셋을 갖추기는 상대적으로 드물기 때문에,\n (무작위 초기화를 통해) 맨 처음부터 합성곱 신경망(Convolutional\n Network) 전체를 학습하는 사람은 매우 적습니다. 대신, 매우 큰 데이터셋(예.\n 100가지 분류에 대해 120만개의 이미지가 포함된 ImageNet)에서 합성곱\n 신경망(ConvNet)을 미리 학습한 후, 이 합성곱 신경망을 관심있는 작업\n 을 위한 초기 설정 또는 고정된 특징 추출기(fixed feature extractor)로 사용합니다.\n\n이러한 전이학습 시나리오의 주요한 2가지는 다음과 같습니다:\n\n- **합성곱 신경망의 미세조정(finetuning)**: 무작위 초기화 대신, 신경망을\n ImageNet 1000 데이터셋 등으로 미리 학습한 신경망으로 초기화합니다. 학습의 나머지\n 과정들은 평상시와 같습니다.\n- **고정된 특징 추출기로써의 합성곱 신경망**: 여기서는 마지막에 완전히 연결\n 된 계층을 제외한 모든 신경망의 가중치를 고정합니다. 이 마지막의 완전히 연결된\n 계층은 새로운 무작위의 가중치를 갖는 계층으로 대체되어 이 계층만 학습합니다.\n\n\"\"\"\n# License: BSD\n# Author: Sasank Chilamkurthy\n\nfrom __future__ import print_function, division\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\n\nplt.ion() # 대화형 모드\n\n######################################################################\n# 데이터 불러오기\n# ---------------\n#\n# 데이터를 불러오기 위해 torchvision과 torch.utils.data 패키지를 사용하겠습니다.\n#\n# 여기서 풀고자 하는 문제는 **개미** 와 **벌** 을 분류하는 모델을 학습하는 것입니다.\n# 개미와 벌 각각의 학습용 이미지는 대략 120장 정도 있고, 75개의 검증용 이미지가\n# 있습니다. 일반적으로 맨 처음부터 학습을 한다면 이는 일반화하기에는 아주 작은\n# 데이터셋입니다. 하지만 우리는 전이학습을 할 것이므로, 일반화를 제법 잘 할 수 있을\n# 것입니다.\n#\n# 이 데이터셋은 ImageNet의 아주 작은 일부입니다.\n#\n# .. Note ::\n# 데이터를 `여기 <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`_\n# 에서 다운로드 받아 현재 디렉토리에 압축을 푸십시오.\n\n# 학습을 위해 데이터 증가(augmentation) 및 일반화(normalization)\n# 검증을 위한 일반화\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n}\n\ndata_dir = 'data/hymenoptera_data'\nimage_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms[x])\n for x in ['train', 'val']}\ndataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,\n shuffle=True, num_workers=4)\n for x in ['train', 'val']}\ndataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\nclass_names = image_datasets['train'].classes\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n######################################################################\n# 일부 이미지 시각화하기\n# ^^^^^^^^^^^^^^^^^^^^^^^^^\n# 데이터 증가를 이해하기 위해 일부 학습용 이미지를 시각화해보겠습니다.\n\ndef imshow(inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n plt.pause(0.001) # 갱신이 될 때까지 잠시 기다립니다.\n\n\n# 학습 데이터의 배치를 얻습니다.\ninputs, classes = next(iter(dataloaders['train']))\n\n# 배치로부터 격자 형태의 이미지를 만듭니다.\nout = torchvision.utils.make_grid(inputs)\n\nimshow(out, title=[class_names[x] for x in classes])\n\n\n######################################################################\n# 모델 학습하기\n# --------------\n#\n# 이제 모델을 학습하기 위한 일반 함수를 작성해보겠습니다. 여기서는 다음 내용들을\n# 설명합니다:\n#\n# - 학습율(learning rate) 관리(scheduling)\n# - 최적의 모델 구하기\n#\n# 아래에서 ``scheduler`` 매개변수는 ``torch.optim.lr_scheduler`` 의 LR 스케쥴러\n# 객체(Object)입니다.\n\n\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # 각 에폭(epoch)은 학습 단계와 검증 단계를 갖습니다.\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # 모델을 학습 모드로 설정\n else:\n model.eval() # 모델을 평가 모드로 설정\n\n running_loss = 0.0\n running_corrects = 0\n\n # 데이터를 반복\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # 매개변수 경사도를 0으로 설정\n optimizer.zero_grad()\n\n # 순전파\n # 학습 시에만 연산 기록을 추적\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n # 학습 단계인 경우 역전파 + 최적화\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # 통계\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n if phase == 'train':\n scheduler.step()\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n # 모델을 깊은 복사(deep copy)함\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # 가장 나은 모델 가중치를 불러옴\n model.load_state_dict(best_model_wts)\n return model\n\n\n######################################################################\n# 모델 예측값 시각화하기\n# ^^^^^^^^^^^^^^^^^^^^^^^\n#\n# 일부 이미지에 대한 예측값을 보여주는 일반화된 함수입니다.\n#\n\ndef visualize_model(model, num_images=6):\n was_training = model.training\n model.eval()\n images_so_far = 0\n fig = plt.figure()\n\n with torch.no_grad():\n for i, (inputs, labels) in enumerate(dataloaders['val']):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n\n for j in range(inputs.size()[0]):\n images_so_far += 1\n ax = plt.subplot(num_images//2, 2, images_so_far)\n ax.axis('off')\n ax.set_title('predicted: {}'.format(class_names[preds[j]]))\n imshow(inputs.cpu().data[j])\n\n if images_so_far == num_images:\n model.train(mode=was_training)\n return\n model.train(mode=was_training)\n\n######################################################################\n# 합성곱 신경망 미세조정(finetuning)\n# ----------------------------------\n#\n# 미리 학습한 모델을 불러온 후 마지막의 완전히 연결된 계층을 초기화합니다.\n#\n\nmodel_ft = models.resnet18(pretrained=True)\nnum_ftrs = model_ft.fc.in_features\n# 여기서 각 출력 샘플의 크기는 2로 설정합니다.\n# 또는, nn.Linear(num_ftrs, len (class_names))로 일반화할 수 있습니다.\nmodel_ft.fc = nn.Linear(num_ftrs, 2)\n\nmodel_ft = model_ft.to(device)\n\ncriterion = nn.CrossEntropyLoss()\n\n# 모든 매개변수들이 최적화되었는지 관찰\noptimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)\n\n# 7 에폭마다 0.1씩 학습율 감소\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)\n\n######################################################################\n# 학습 및 평가하기\n# ^^^^^^^^^^^^^^^^^^\n#\n# CPU에서는 15-25분 가량, GPU에서는 1분도 이내의 시간이 걸립니다.\n#\n\nmodel_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,\n num_epochs=25)\n\n######################################################################\n#\n\nvisualize_model(model_ft)\n\n\n######################################################################\n# 고정된 특징 추출기로써의 합성곱 신경망\n# ---------------------------------------\n#\n# 이제, 마지막 계층을 제외한 신경망의 모든 부분을 고정해야 합니다.\n# ``requires_grad == False`` 로 설정하여 매개변수를 고정하여 ``backward()`` 중에\n# 경사도가 계산되지 않도록 해야합니다.\n#\n# 이에 대한 문서는\n# `여기 <http://pytorch.org/docs/notes/autograd.html#excluding-subgraphs-from-backward>`__\n# 에서 확인할 수 있습니다.\n#\n\nmodel_conv = torchvision.models.resnet18(pretrained=True)\nfor param in model_conv.parameters():\n param.requires_grad = False\n\n# 새로 생성된 모듈의 매개변수는 기본값이 requires_grad=True 임\nnum_ftrs = model_conv.fc.in_features\nmodel_conv.fc = nn.Linear(num_ftrs, 2)\n\nmodel_conv = model_conv.to(device)\n\ncriterion = nn.CrossEntropyLoss()\n\n# 이전과는 다르게 마지막 계층의 매개변수들만 최적화되는지 관찰\noptimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)\n\n# 7 에폭마다 0.1씩 학습율 감소\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n\n\n######################################################################\n# 학습 및 평가하기\n# ^^^^^^^^^^^^^^^^^\n#\n# CPU에서 실행하는 경우 이전과 비교했을 때 약 절반 가량의 시간만이 소요될 것입니다.\n# 이는 대부분의 신경망에서 경사도를 계산할 필요가 없기 때문입니다. 하지만,\n# 순전파는 계산이 필요합니다.\n#\n\nmodel_conv = train_model(model_conv, criterion, optimizer_conv,\n exp_lr_scheduler, num_epochs=25)\n\n######################################################################\n#\n\nvisualize_model(model_conv)\n\nplt.ioff()\nplt.show()\n\n######################################################################\n# 더 배워볼 내용\n# -----------------\n#\n# 전이학습의 응용 사례(application)들을 더 알아보려면,\n# :doc:`/intermediate/quantized_transfer_learning_tutorial` 을 참조해보세요.\n#\n#\n", "\"\"\"\nTorchScript 소개\n===========================\n\n**Author**: James Reed ([email protected]), Michael Suo ([email protected]), rev2\n\n**번역**: `강준혁 <https://github.com/k1101jh>`_\n\n이 튜토리얼은 C++와 같은 고성능 환경에서 실행될 수 있는\nPyTorch 모델(``nn.Module`` 의 하위클래스)의 중간 표현인\nTorchScript에 대한 소개입니다.\n\n이 튜토리얼에서는 다음을 다룰 것입니다:\n\n1. 다음을 포함한 PyTorch의 모델 제작의 기본:\n\n- 모듈(Modules)\n- ``forward`` 함수 정의하기\n- 모듈을 계층 구조로 구성하기\n\n2. PyTorch 모듈을 고성능 배포 런타임인 TorchScript로 변환하는 특정 방법\n\n- 기존 모듈 추적하기\n- 스크립트를 사용하여 모듈을 직접 컴파일하기\n- 두 가지 접근 방법을 구성하는 방법\n- TorchScript 모듈 저장 및 불러오기\n\n이 튜토리얼을 완료한 후에는\n`다음 학습서 <https://pytorch.org/tutorials/advanced/cpp_export.html>`_\n를 통해 C++에서 TorchScript 모델을 실제로 호출하는 예제를 안내합니다.\n\n\"\"\"\n\nimport torch # This is all you need to use both PyTorch and TorchScript!\nprint(torch.__version__)\n\n\n######################################################################\n# PyTorch 모델 작성의 기초\n# ---------------------------------\n#\n# 간단한 ``Module`` 을 정의하는 것부터 시작하겠습니다. ``Module`` 은 PyTorch의\n# 기본 구성 단위입니다. 이것은 다음을 포함합니다:\n#\n# 1. 호출을 위해 모듈을 준비하는 생성자\n# 2. ``Parameters`` 집합과 하위 ``Module`` . 이것들은 생성자에 의해 초기화되며\n# 호출 중에 모듈에 의해 사용될 수 있습니다.\n# 3. ``forward`` 함수. 모듈이 호출될 때 실행되는 코드입니다.\n#\n# 작은 예제로 시작해 보겟습니다:\n#\n\nclass MyCell(torch.nn.Module):\n def __init__(self):\n super(MyCell, self).__init__()\n\n def forward(self, x, h):\n new_h = torch.tanh(x + h)\n return new_h, new_h\n\nmy_cell = MyCell()\nx = torch.rand(3, 4)\nh = torch.rand(3, 4)\nprint(my_cell(x, h))\n\n\n######################################################################\n# 우리는 다음 작업을 수행했습니다.:\n#\n# 1. 하위 클래스로 ``torch.nn.Module`` 을 갖는 클래스를 생성했습니다.\n# 2. 생성자를 정의했습니다. 생성자는 많은 작업을 수행하지 않고 ``super`` 로\n# 생성자를 호출합니다.\n# 3. 두 개의 입력을 받아 두 개의 출력을 반환하는 ``forward`` 함수를 정의했습니다.\n# ``forward`` 함수의 실제 내용은 크게 중요하진 않지만, 가짜 `RNN\n# cell <https://colah.github.io/posts/2015-08-Understanding-LSTMs/>`__ 의\n# 일종입니다. 즉, 반복(loop)에 적용되는 함수입니다.\n#\n# 모듈을 인스턴스화하고, 3x4 크기의 무작위 값들로 이루어진 행렬 ``x`` 와 ``y`` 를\n# 만들었습니다.\n# 그런 다음, ``my_cell(x, h)`` 를 이용해 cell을 호출했습니다. 이것은 ``forward``\n# 함수를 호출합니다.\n#\n# 좀 더 흥미로운 것을 해봅시다:\n#\n\nclass MyCell(torch.nn.Module):\n def __init__(self):\n super(MyCell, self).__init__()\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, x, h):\n new_h = torch.tanh(self.linear(x) + h)\n return new_h, new_h\n\nmy_cell = MyCell()\nprint(my_cell)\nprint(my_cell(x, h))\n\n\n######################################################################\n# 모듈 ``MyCell`` 을 재정의했지만, 이번에는 ``self.linear`` 속성을 추가하고\n# forward 함수에서 ``self.linear`` 을 호출했습니다.\n#\n# 여기서 무슨 일이 일어날까요? ``torch.nn.Linear`` 은 ``MyCell`` 과\n# 마찬가지로 PyTorch 표준 라이브러리의 ``Module`` 입니다. 이것은 호출 구문을\n# 사용하여 호출할 수 있습니다. 우리는 ``Module`` 의 계층을 구축하고 있습니다.\n#\n# ``Module`` 에서 ``print`` 하는 것은 ``Module`` 의 하위 클래스 계층에 대한\n# 시각적 표현을 제공할 것입니다. 이 예제에서는 ``Linear`` 의 하위 클래스와\n# 하위 클래스의 매개 변수를 볼 수 있습니다.\n#\n# ``Module`` 을 이런 방식으로 작성하면, 재사용 가능한 구성 요소를 사용하여\n# 모델을 간결하고 읽기 쉽게 작성할 수 있습니다.\n#\n# 여러분은 출력된 내용에서 ``grad_fn`` 을 확인하셨을 것입니다. 이것은\n# `오토그라드(autograd) <https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html>`__\n# 라 불리는 PyTorch의 자동 미분 방법의 세부 정보입니다. 요컨데, 이 시스템은\n# 잠재적으로 복잡한 프로그램을 통해 미분을 계산할 수 있게 합니다. 이 디자인은\n# 모델 제작에 엄청난 유연성을 제공합니다.\n#\n# 이제 유연성을 시험해 보겠습니다.\n#\n\nclass MyDecisionGate(torch.nn.Module):\n def forward(self, x):\n if x.sum() > 0:\n return x\n else:\n return -x\n\nclass MyCell(torch.nn.Module):\n def __init__(self):\n super(MyCell, self).__init__()\n self.dg = MyDecisionGate()\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, x, h):\n new_h = torch.tanh(self.dg(self.linear(x)) + h)\n return new_h, new_h\n\nmy_cell = MyCell()\nprint(my_cell)\nprint(my_cell(x, h))\n\n\n######################################################################\n# MyCell 클래스를 다시 정의했지만, 여기선 ``MyDecisionGate`` 를 정의했습니다.\n# 이 모듈은 **제어 흐름** 을 활용합니다. 제어 흐름은 루프와 ``if`` 명령문과\n# 같은 것으로 구성됩니다.\n#\n# 많은 프레임워크들은 주어진 프로그램 코드로부터 기호식 미분(symbolic\n# derivatives)을 계산하는 접근법을 취하고 있습니다. 하지만, PyTorch에서는 변화도\n# 테이프(gradient tape)를 사용합니다. 연산이 발생할 때 이를 기록하고, 미분값을\n# 계산할 때 거꾸로 재생합니다. 이런 방식으로, 프레임워크는 언어의 모든 구문에\n# 대한 미분값을 명시적으로 정의할 필요가 없습니다.\n#\n# .. figure:: https://github.com/pytorch/pytorch/raw/master/docs/source/_static/img/dynamic_graph.gif\n# :alt: 오토그라드가 작동하는 방식\n#\n# 오토그라드가 작동하는 방식\n#\n\n\n######################################################################\n# TorchScript의 기초\n# ---------------------\n#\n# 이제 실행 예제를 살펴보고 TorchScript를 적용하는 방법을 살펴보겠습니다.\n#\n# 한마디로, TorchScript는 PyTorch의 유연하고 동적인 특성을 고려하여 모델 정의를\n# 캡쳐할 수 있는 도구를 제공합니다.\n# **추적(tracing)** 이라 부르는 것을 검사하는 것으로 시작하겠습니다.\n#\n# ``Module`` 추적\n# ~~~~~~~~~~~~~~~~~~~\n#\n\nclass MyCell(torch.nn.Module):\n def __init__(self):\n super(MyCell, self).__init__()\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, x, h):\n new_h = torch.tanh(self.linear(x) + h)\n return new_h, new_h\n\nmy_cell = MyCell()\nx, h = torch.rand(3, 4), torch.rand(3, 4)\ntraced_cell = torch.jit.trace(my_cell, (x, h))\nprint(traced_cell)\ntraced_cell(x, h)\n\n\n######################################################################\n# 살짝 앞으로 돌아가 ``MyCell`` 의 두 번째 버전을 가져왔습니다. 이전에 이것을\n# 인스턴스화 했지만 이번엔 ``torch.jit.trace`` 를 호출하고, ``Module`` 을\n# 전달했으며, 네트워크가 볼 수 있는 *입력 예* 를 전달했습니다.\n#\n# 여기서 무슨 일이 발생했습니까? ``Module`` 을 호출하였고, ``Module`` 이 돌아갈 때\n# 발생한 연산을 기록하였고, ``torch.jit.ScriptModule`` 의 인스터스를 생성했습니다.\n# ( ``TracedModule`` 은 인스턴스입니다)\n#\n# TorchScript는 일반적으로 딥 러닝에서 *그래프* 라고 하는 중간 표현(또는 IR)에\n# 정의를 기록합니다. ``.graph`` 속성으로 그래프를 확인해볼 수 있습니다:\n#\n\nprint(traced_cell.graph)\n\n\n######################################################################\n# 그러나, 이것은 저수준의 표현이며 그래프에 포함된 대부분의 정보는\n# 최종 사용자에게 유용하지 않습니다. 대신, ``.code`` 속성을 사용하여 코드에\n# 대한 Python 구문 해석을 제공할 수 있습니다:\n#\n\nprint(traced_cell.code)\n\n\n######################################################################\n# **어째서** 이런 일들을 했을까요? 여기에는 몇 가지 이유가 있습니다:\n#\n# 1. TorchScript 코드는 기본적으로 제한된 Python 인터프리터인 자체 인터프리터에서\n# 호출될 수 있습니다. 이 인터프리터는 GIL(Global Interpreter Lock)을 얻지\n# 않으므로 동일한 인스턴스에서 동시에 많은 요청을 처리할 수 있습니다.\n# 2. 이 형식을 사용하면 전체 모델을 디스크에 저장하고 Python 이외의 언어로 작성된\n# 서버와 같은 다른 환경에서 불러올 수 있습니다.\n# 3. TorchScript는 보다 효율적인 실행을 제공하기 위해 코드에서 컴파일러 최적화를\n# 수행할 수 있는 표현을 제공합니다.\n# 4. TorchScript를 사용하면 개별 연산자보다 프로그램의 더 넓은 관점을 요구하는 많은\n# 백엔드/장치 런타임과 상호작용(interface)할 수 있습니다.\n#\n# ``traced_cell`` 을 호출하면 Python 모듈과 동일한 결과가 생성됩니다:\n#\n\nprint(my_cell(x, h))\nprint(traced_cell(x, h))\n\n\n######################################################################\n# 스크립팅을 사용하여 모듈 변환\n# ----------------------------------\n#\n# 제어 흐름이 포함된(control-flow-laden) 하위 모듈이 아닌 모듈 버전 2를 사용하는\n# 이유가 있습니다. 지금 살펴봅시다:\n#\n\nclass MyDecisionGate(torch.nn.Module):\n def forward(self, x):\n if x.sum() > 0:\n return x\n else:\n return -x\n\nclass MyCell(torch.nn.Module):\n def __init__(self, dg):\n super(MyCell, self).__init__()\n self.dg = dg\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, x, h):\n new_h = torch.tanh(self.dg(self.linear(x)) + h)\n return new_h, new_h\n\nmy_cell = MyCell(MyDecisionGate())\ntraced_cell = torch.jit.trace(my_cell, (x, h))\nprint(traced_cell.code)\n\n\n######################################################################\n# ``.code`` 출력을 보면, ``if-else`` 분기가 어디에도 없다는 것을 알 수 있습니다!\n# 어째서일까요? 추적은 코드를 실행하고 *발생하는* 작업을 기록하며 정확하게 수행하는\n# 스크립트 모듈(ScriptModule)을 구성하는 일을 수행합니다. 불행하게도, 제어 흐름과\n# 같은 것들은 지워집니다.\n#\n# TorchScript에서 이 모듈을 어떻게 충실하게 나타낼 수 있을까요? Python 소스 코드를\n# 직접 분석하여 TorchScript로 변환하는 **스크립트 컴파일러(script compiler)** 를\n# 제공합니다. ``MyDecisionGate`` 를 스크립트 컴파일러를 사용하여 변환해 봅시다:\n#\n\nscripted_gate = torch.jit.script(MyDecisionGate())\n\nmy_cell = MyCell(scripted_gate)\ntraced_cell = torch.jit.script(my_cell)\nprint(traced_cell.code)\n\n\n######################################################################\n# 만세! 이제 TorchScript에서 프로그램의 동작을 충실하게 캡쳐했습니다. 이제\n# 프로그램을 실행해 봅시다:\n#\n\n# 새로운 입력\nx, h = torch.rand(3, 4), torch.rand(3, 4)\ntraced_cell(x, h)\n\n\n######################################################################\n# 스크립팅과 추적 혼합\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# 어떤 상황에서는 스크립팅보다는 추적을 사용해야 합니다. (예: 모듈에는 TorchScript에\n# 표시하지 않으려는 Python 상수 값을 기반으로 만들어진 많은 구조적인\n# 결정(architectural decisions)이 있습니다.) 이 경우, 스크립팅은 추적으로\n# 구성될 수 있습니다: ``torch.jit.script`` 는 추적된 모듈의 코드를 인라인(inline)\n# 할 것이고, 추적은 스크립트 된 모듈의 코드를 인라인 할 것입니다.\n#\n# 첫 번째 경우의 예:\n#\n\nclass MyRNNLoop(torch.nn.Module):\n def __init__(self):\n super(MyRNNLoop, self).__init__()\n self.cell = torch.jit.trace(MyCell(scripted_gate), (x, h))\n\n def forward(self, xs):\n h, y = torch.zeros(3, 4), torch.zeros(3, 4)\n for i in range(xs.size(0)):\n y, h = self.cell(xs[i], h)\n return y, h\n\nrnn_loop = torch.jit.script(MyRNNLoop())\nprint(rnn_loop.code)\n\n\n\n######################################################################\n# 두 번째 경우의 예:\n#\n\nclass WrapRNN(torch.nn.Module):\n def __init__(self):\n super(WrapRNN, self).__init__()\n self.loop = torch.jit.script(MyRNNLoop())\n\n def forward(self, xs):\n y, h = self.loop(xs)\n return torch.relu(y)\n\ntraced = torch.jit.trace(WrapRNN(), (torch.rand(10, 3, 4)))\nprint(traced.code)\n\n\n######################################################################\n# 이러한 방식으로, 스크립팅과 추적은 상황에 따라서 따로 사용되거나, 함께\n# 사용될 수 있습니다.\n#\n# 모델 저장 및 불러오기\n# -------------------------\n#\n# TorchScript 모듈을 아카이브 형식으로 디스크에 저장하고 불러오는 API를 제공합니다.\n# 이 형식은 코드, 매개 변수, 속성과 디버그 정보를 포함합니다. 이것은 그 아카이브가\n# 완전히 별개의 프로세스로 로드할 수 있는 모델의 독립 표현임을 의미합니다.\n# 랩핑 된 RNN 모듈을 저장하고 로드해 봅시다:\n#\n\ntraced.save('wrapped_rnn.zip')\n\nloaded = torch.jit.load('wrapped_rnn.zip')\n\nprint(loaded)\nprint(loaded.code)\n\n\n######################################################################\n# 보시다시피, 직렬화는 모듈 계층과 검사한 코드를 유지합니다. 또한 모델을 로드할\n# 수 있습니다. 예를 들어, Python 없이 실행하기 위해 모델을\n# `C++ <https://pytorch.org/tutorials/advanced/cpp_export.html>`__ 로 로드할\n# 수 있습니다.\n#\n# 더 읽을거리\n# ~~~~~~~~~~~~~~~\n# 튜토리얼을 완료했습니다! 관련 데모를 보려면 TorchScript를 사용하여 기계 번역\n# 모델을 변환하기 위한 NeurIPS 데모를 확인하십시오:\n# https://colab.research.google.com/drive/1HiICg6jRkBnr5hvK2-VnMi88Vi9pUzEJ\n#\n" ]
[ [ "matplotlib.pyplot.imshow", "torch.max", "torch.utils.data.DataLoader", "torch.sum", "torch.set_grad_enabled", "torch.no_grad", "torch.cuda.is_available", "torch.nn.CrossEntropyLoss", "numpy.clip", "matplotlib.pyplot.subplot", "matplotlib.pyplot.figure", "torch.optim.lr_scheduler.StepLR", "matplotlib.pyplot.title", "torch.nn.Linear", "numpy.array", "matplotlib.pyplot.ion", "matplotlib.pyplot.show", "matplotlib.pyplot.ioff", "matplotlib.pyplot.pause" ], [ "torch.jit.script", "torch.jit.load", "torch.jit.trace", "torch.zeros", "torch.tanh", "torch.nn.Linear", "torch.relu", "torch.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JDMusc/Online-Bullying-Image-Classifcation
[ "9196c60c554cf160d68cb9e9c41fda124abebf63", "9196c60c554cf160d68cb9e9c41fda124abebf63", "9196c60c554cf160d68cb9e9c41fda124abebf63", "9196c60c554cf160d68cb9e9c41fda124abebf63" ]
[ "modelEpochs.py", "scrapDataHelpers.py", "presentUtils.py", "test_original.py" ]
[ "import copy\nimport numpy as np\nfrom numpy import log10\nimport os\nfrom toolz import pipe as p\n\nfrom tensorboardX import SummaryWriter\n\nimport torch\nimport torch.nn as nn\n\nimport numpy as np\n\nimport preprocessing as pp\n\n\ndef findParam(model, name_filter):\n if callable(name_filter):\n fn = name_filter\n else:\n name_filter = [name_filter] if type(name_filter) is str else name_filter\n fn = lambda param_name: all(\n component in param_name for component in name_filter)\n \n return [(pn, pv) for (pn, pv) in model.named_parameters() if fn(pn)]\n\n\ndef setParameterRequiresGrad(model, requires_grad = False, params = None):\n params = model.parameters() if params is None else params\n for param in params:\n param.requires_grad = requires_grad\n\n\ndef runEpochs(\n model, criterion, \n dataloaders, dataset_sizes, device, \n log_params_verbose, num_epochs,\n optimizer, scheduler, \n writer):\n\n \n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n prev_model_wts = best_model_wts\n for epoch in range(num_epochs):\n epoch_acc, model_wts = _run_epoch(\n model, \n criterion, dataloaders, dataset_sizes, device, \n epoch, log_params_verbose, num_epochs, \n optimizer, scheduler, writer)\n \n _log_coef_diffs(writer, epoch, prev_model_wts, model_wts)\n prev_model_wts = model_wts\n\n if epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = model_wts\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return (model, best_acc)\n\n\ndef viewParamsToBeUpdated(model):\n return [n for (n,p) in model.named_parameters() if p.requires_grad == True]\n\n\ndef add_graph_model(writer, model, dataloaders, device):\n inputs, classes = p(dataloaders['train'], iter, next)\n \n inputs = inputs.to(device)\n classes = classes.to(device)\n \n writer.add_graph(model, inputs)\n\n\ndef _run_epoch(model, \n criterion, dataloaders, dataset_sizes, device, \n epoch, log_params_verbose, num_epochs,\n optimizer, scheduler, writer):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n n_samples = {'train': 0, 'val': 0}\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n is_train = phase == 'train'\n\n if is_train:\n scheduler.step()\n model.train()\n else:\n model.eval()\n\n running_loss = 0.0\n running_corrects = 0\n\n for inputs, labels in dataloaders[phase]:\n n_samples[phase] = n_samples[phase] + len(labels)\n\n inputs = inputs.to(device)\n labels = labels.to(device)\n preds, loss = _take_step(\n model, criterion, optimizer, inputs, labels, is_train)\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n _log_epoch_phase_stats(writer, epoch, phase, epoch_loss, epoch_acc)\n\n if log_params_verbose:\n _log_model_params_verbose(writer, model, epoch, phase)\n\n # deep copy the model\n model_wts = copy.deepcopy(model.state_dict())\n\n _log_lr(writer, epoch, scheduler)\n print('# training samples')\n print(n_samples['train'])\n print('# val samples')\n print(n_samples['val'])\n \n return epoch_acc, model_wts\n\n\n\ndef _take_step(model, criterion, optimizer, inputs, labels, is_train):\n \n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(is_train):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if is_train:\n loss.backward()\n optimizer.step()\n \n return preds, loss\n\n\ndef _add_scope(scope, k):\n return scope + '/' + k\n \n\ndef _add_scope_gen(scope):\n return lambda k: _add_scope(scope, k)\n\n\ndef _log_model_params_verbose(writer, model, run_num, scope, use_hist = False):\n def write(tag, param):\n fn = writer.add_histogram if use_hist else writer.add_scalar\n param = param if use_hist else param.abs().mean()\n return fn(tag, param, run_num)\n \n with torch.no_grad():\n for (name, param) in model.named_parameters():\n p(name, \n _add_scope_gen(scope),\n lambda tag: write(tag, param)\n )\n\n\ndef _log_lr(writer, epoch, scheduler):\n lr = p(scheduler.get_lr(), np.array)[0]\n p('lr', \n _add_scope_gen('lr'),\n lambda _: writer.add_scalar(_, lr, epoch)\n )\n p('log10_lr',\n _add_scope_gen('lr'),\n lambda _: writer.add_scalar(_, log10(lr), epoch)\n )\n\n\ndef _log_epoch_phase_stats(writer, epoch, scope, epoch_loss, epoch_acc): \n\n log_measure = lambda k, v: p(k,\n _add_scope_gen(scope),\n lambda _ : writer.add_scalar(_, v, epoch)\n )\n \n log_measure('loss', epoch_loss)\n log_measure('accuracy', epoch_acc)\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n scope, epoch_loss, epoch_acc))\n \n\ndef _log_coef_diffs(writer, epoch, prev_model_state, curr_model_state):\n def write(name, curr):\n diff = curr - prev_model_state[name]\n p(name,\n _add_scope_gen('params'),\n lambda _: writer.add_scalar(\n _ + '.diff', diff.abs().mean(), epoch)\n )\n\n with torch.no_grad():\n for name in curr_model_state:\n if ('weight' in name or 'bias' in name): \n write(name, curr_model_state[name])\n\n\n", "import os\nimport shutil\n\nimport numpy as np\n\nfrom toolz import pipe as p\n\n\ndef makeScrapData(classes, dest_dir = None, n_train = 30, n_val = None, src_dir = 'image_data'):\n if dest_dir is None:\n dest_dir = 'scrap_data' + str(n_train)\n\n fs = {c: [os.path.join(src_dir, c, f) for f in p(os.path.join(src_dir, c), os.listdir)]\n for c in classes}\n \n by_phase = 'train' in os.listdir(src_dir) and 'test' in os.listdir(src_dir)\n class_percents = classPercentages(src_dir, classes = classes, by_phase= by_phase)['percent']\n \n train_counts = {c: int(class_percents[c]/100 * n_train) for c in classes}\n \n train_fs = {c: np.random.choice(fs[c], train_counts[c], replace = False) for c in classes}\n \n val_candidates = lambda c: list(set(fs[c]) - set(train_fs[c]))\n val_fs = {c: val_candidates(c) for c in classes}\n if n_val is not None:\n val_counts = {c: int(class_percents[c]/100 * n_val) for c in classes}\n val_fs = {c: np.random.choice(val_candidates(c), val_counts[c], replace = False)\n for c in classes}\n \n if os.path.exists(dest_dir):\n shutil.rmtree(dest_dir)\n \n os.mkdir(dest_dir)\n \n joinDirGen = lambda d: lambda f: os.path.join(d, f)\n joinScrapDir = joinDirGen(dest_dir)\n \n train_val_fs = dict(train=train_fs, val=val_fs)\n for tv in ('train', 'val'):\n p(tv, joinScrapDir, os.mkdir)\n \n for c in classes:\n p(c, joinDirGen(tv), joinScrapDir, os.mkdir) \n \n tv_fs = train_val_fs[tv][c]\n for f in tv_fs:\n dest = p(f,\n os.path.basename,\n joinDirGen(c), \n joinDirGen(tv),\n joinScrapDir)\n shutil.copyfile(f, dest)\n \n \ndef classPercentages(data_dir, by_phase = True, classes = None):\n \n if not by_phase:\n classes = os.listdir(data_dir) if classes is None else classes\n class_counts = {c: p(os.path.join(data_dir, c), os.listdir, len) for c in classes}\n n_total = sum(class_counts.values())\n \n class_percents = {c: count/n_total * 100 for (c, count) in class_counts.items()}\n \n return dict(percent = class_percents, count = class_counts)\n \n xs = ('train', 'val')\n \n if classes is None:\n train_dir = os.path.join(data_dir, 'train')\n classes = os.listdir(train_dir)\n \n folders = {(x,c):os.path.join(data_dir, x, c) for x in xs\n for c in classes}\n \n train_val_counts = {x:sum(\n [p(folders[x, c], os.listdir, len) for c in classes])\n for x in xs}\n \n class_counts = {(x, c): p(folders[x, c], os.listdir, len)\n for c in classes for x in xs}\n \n class_percents = {xc: count/train_val_counts[xc[0]] \n for (xc, count) in class_counts.items()}\n \n return dict(percent = class_percents, count = class_counts)\n", "import pandas as pd\n\nimport analyzeModel\n\n\ndef makePredsPerformanceTable(preds_f, phase = None):\n preds = pd.read_csv(preds_f)\n perf = analyzeModel.performanceMetrics(preds)\n if phase is not None:\n perf = analyzeModel.performanceMetricsWithPhase(preds)\n perf = perf[phase]\n\n\n for k in perf.keys():\n perf[k].pop('class_counts', None)\n\n return pd.DataFrame(perf)\n", "#!/usr/bin/python\n\nimport sys\n\nimport io\nfrom PIL import Image\nfrom skimage import io\nimport torch\nfrom torch.autograd import Variable\nfrom torchvision import datasets, models, transforms\n\nimport localResnet\n\nf_name = str(sys.argv[1])\n\nprint(f_name)\n\ndevice = torch.device(\"cuda\")\nn_classes = 10\nmodel = localResnet.ResNet([2, 2, 2], n_classes, in_channels=32).to(device)\nmodel.load_state_dict(torch.load('model_final.pt'))\nmodel.eval()\n\n\ndefaultMn = [0.485, 0.456, 0.406]\ndefaultSd = [0.229, 0.224, 0.225]\n\ntform = transforms.Compose([\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(defaultMn, defaultSd)\n])\n\nclasses = ['gossiping',\n 'isolation',\n 'laughing',\n 'nonbullying',\n 'pullinghair',\n 'punching',\n 'quarrel',\n 'slapping',\n 'stabbing',\n 'strangle']\n\nimg = tform(Image.open(f_name)).to(device)\nimg.unsqueeze_(0)\n_, pred = torch.max(model(img), 1)\nprint(classes[pred])\n" ]
[ [ "torch.max", "torch.sum", "numpy.log10", "torch.set_grad_enabled", "torch.no_grad" ], [ "numpy.random.choice" ], [ "pandas.read_csv", "pandas.DataFrame" ], [ "torch.device", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MrThiago/FaceForensics
[ "a815daa9ebb7c12240a4b7162c431af0e1b959fa" ]
[ "dataset/DeepFakes/faceswap-master/lib/training_data.py" ]
[ "import cv2\nimport numpy\nfrom random import shuffle\n\nfrom .utils import BackgroundGenerator\nfrom .umeyama import umeyama\n\nclass TrainingDataGenerator():\n def __init__(self, random_transform_args, coverage, scale=5, zoom=1): #TODO thos default should stay in the warp function\n self.random_transform_args = random_transform_args\n self.coverage = coverage\n self.scale = scale\n self.zoom = zoom\n\n def minibatchAB(self, images, batchsize):\n batch = BackgroundGenerator(self.minibatch(images, batchsize), 1)\n for ep1, warped_img, target_img in batch.iterator():\n yield ep1, warped_img, target_img\n\n # A generator function that yields epoch, batchsize of warped_img and batchsize of target_img\n def minibatch(self, data, batchsize):\n length = len(data)\n assert length >= batchsize, \"Number of images is lower than batch-size (Note that too few images may lead to bad training). # images: {}, batch-size: {}\".format(length, batchsize)\n epoch = i = 0\n shuffle(data)\n while True:\n size = batchsize\n if i+size > length:\n shuffle(data)\n i = 0\n epoch+=1\n rtn = numpy.float32([self.read_image(img) for img in data[i:i+size]])\n i+=size\n yield epoch, rtn[:,0,:,:,:], rtn[:,1,:,:,:] \n\n def color_adjust(self, img):\n return img / 255.0\n \n def read_image(self, fn):\n try:\n image = self.color_adjust(cv2.imread(fn))\n except TypeError:\n raise Exception(\"Error while reading image\", fn)\n \n image = cv2.resize(image, (256,256))\n image = self.random_transform( image, **self.random_transform_args )\n warped_img, target_img = self.random_warp( image, self.coverage, self.scale, self.zoom )\n \n return warped_img, target_img\n\n def random_transform(self, image, rotation_range, zoom_range, shift_range, random_flip):\n h, w = image.shape[0:2]\n rotation = numpy.random.uniform(-rotation_range, rotation_range)\n scale = numpy.random.uniform(1 - zoom_range, 1 + zoom_range)\n tx = numpy.random.uniform(-shift_range, shift_range) * w\n ty = numpy.random.uniform(-shift_range, shift_range) * h\n mat = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, scale)\n mat[:, 2] += (tx, ty)\n result = cv2.warpAffine(\n image, mat, (w, h), borderMode=cv2.BORDER_REPLICATE)\n if numpy.random.random() < random_flip:\n result = result[:, ::-1]\n return result\n\n # get pair of random warped images from aligned face image\n def random_warp(self, image, coverage, scale = 5, zoom = 1):\n assert image.shape == (256, 256, 3)\n range_ = numpy.linspace(128 - coverage//2, 128 + coverage//2, 5)\n mapx = numpy.broadcast_to(range_, (5, 5))\n mapy = mapx.T\n\n mapx = mapx + numpy.random.normal(size=(5,5), scale=scale)\n mapy = mapy + numpy.random.normal(size=(5,5), scale=scale)\n\n interp_mapx = cv2.resize(mapx, (80*zoom,80*zoom))[8*zoom:72*zoom,8*zoom:72*zoom].astype('float32')\n interp_mapy = cv2.resize(mapy, (80*zoom,80*zoom))[8*zoom:72*zoom,8*zoom:72*zoom].astype('float32')\n\n warped_image = cv2.remap(image, interp_mapx, interp_mapy, cv2.INTER_LINEAR)\n\n src_points = numpy.stack([mapx.ravel(), mapy.ravel() ], axis=-1)\n dst_points = numpy.mgrid[0:65*zoom:16*zoom,0:65*zoom:16*zoom].T.reshape(-1,2)\n mat = umeyama(src_points, dst_points, True)[0:2]\n\n target_image = cv2.warpAffine(image, mat, (64*zoom,64*zoom))\n\n return warped_image, target_image\n\ndef stack_images(images):\n def get_transpose_axes(n):\n if n % 2 == 0:\n y_axes = list(range(1, n - 1, 2))\n x_axes = list(range(0, n - 1, 2))\n else:\n y_axes = list(range(0, n - 1, 2))\n x_axes = list(range(1, n - 1, 2))\n return y_axes, x_axes, [n - 1]\n \n images_shape = numpy.array(images.shape)\n new_axes = get_transpose_axes(len(images_shape))\n new_shape = [numpy.prod(images_shape[x]) for x in new_axes]\n return numpy.transpose(\n images,\n axes=numpy.concatenate(new_axes)\n ).reshape(new_shape)\n" ]
[ [ "numpy.random.random", "numpy.linspace", "numpy.concatenate", "numpy.random.normal", "numpy.broadcast_to", "numpy.prod", "numpy.random.uniform", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pseudowasabi/computer-vision-exercises
[ "34b7c8402c32dbb00e484f90780ebb6546a3f8dc", "34b7c8402c32dbb00e484f90780ebb6546a3f8dc" ]
[ "CV_A4_/A4_compute_descriptors.py", "CV_A1_/A1_corner_detection.py" ]
[ "'''\nComputer vision assignment 4 by Yoseob Kim\nA4_compute_descriptors.py\nCompute similarity-reflected image descriptors with L1, L2 norm distances by using SIFT descriptors.\n\n* Status: (working on it)\n* GitHub Link: https://github.com/pseudowasabi/computer-vision-exercises/tree/master/CV_A4_\n'''\n\nimport cv2\nimport numpy as np\nimport math\nimport time\nimport operator\nimport random\n\nimg = cv2.imread('ukbench00000.jpg', cv2.IMREAD_GRAYSCALE)\n\n'''\nmy_min = np.inf\nmy_max = 0'''\nfor i in range(1000):\n offset = '00' if i < 10 else '0' if i < 100 else ''\n offset += str(i)\n #print(offset)\n\n f = open('./sift/sift100'+offset, 'rb')\n\n # reference - https://numpy.org/doc/stable/reference/generated/numpy.frombuffer.html\n sift_des = np.frombuffer(f.read(), dtype=np.uint8)\n #print(sift_des.shape)\n #print(sift_des)\n\n '''\n if sift_des.shape[0] % 128 != 0:\n print('divide error')\n '''\n sift_des_reshaped = np.reshape(sift_des, (sift_des.shape[0] // 128, 128))\n #print(sift_des_reshaped.shape)\n\n '''\n if sift_des_reshaped.shape[0] < my_min:\n my_min = sift_des_reshaped.shape[0]\n if sift_des_reshaped.shape[0] > my_max:\n my_max = sift_des_reshaped.shape[0]'''\n\n f.close()\n\n\n#print(my_min, my_max)\n# N size\n# min = 73, max = 2388\n\n\n\n\n", "'''\nComputer vision assignment 1 by Yoseob Kim\nA1_corner_detection.py\nImplementation of Harris corner detector.\nGitHub Link: https://github.com/pseudowasabi/computer-vision-exercises/tree/master/CV_A1_\n'''\n\nimport cv2\nimport numpy as np\nimport math\nimport time\nimport operator\nimport os\nfrom filtering_by_yoseob import *\n\n## ** initial settings to make result directory.\n## Reference - https://www.geeksforgeeks.org/python-os-makedirs-method/?ref=lbp\ntry:\n os.makedirs('result', exist_ok=True)\nexcept OSError as error:\n print(\"[NOTICE!] '/result' directory cannot be created.\")\n print(\"Please CREATE the DIRECTORY MANUALLY to save created images.\")\n\n###\n# 3-2. Corner response (define function - a, b, c and d)\n###\n\ndef compute_corner_response(img):\n ## a) apply Sobel filters\n # Sobel filtering function is implemented in filtering_by_yoseob.py\n # sobel_img_x, sobel_img_y is derivatives along x and y direction respectively.\n sobel_img_x, sobel_img_y = my_sobel_filtering(img)\n\n # padded with 0 value to operate corner response\n sobel_img_x = image_padding_2d(sobel_img_x, 2, 1) # (img, padd_width, type=1)\n sobel_img_y = image_padding_2d(sobel_img_y, 2, 1) # type=1 means just padd with zeros.\n\n ## b) compute second moment matrix M\n uni_window = np.ones((5, 5))\n patch_img_x = np.zeros((5, 5))\n patch_img_y = np.zeros((5, 5))\n size0 = img.shape[0]\n size1 = img.shape[1]\n\n ## c) variables for computing corner responses\n R = np.zeros((size0, size1))\n _k = 0.04\n _max_val = 0\n\n elapsed_ = list(range(0, size0, size0 // 20))\n for x in range(size0):\n for y in range(size1):\n # i. subtract mean of each image patch\n _sum_x = 0.\n _sum_y = 0.\n for i in range(x, x+5):\n for j in range(y, y+5):\n patch_img_x[i-x][j-y] = sobel_img_x[i][j]\n _sum_x = operator.__add__(_sum_x, sobel_img_x[i][j])\n patch_img_y[i-x][j-y] = sobel_img_y[i][j]\n _sum_y = operator.__add__(_sum_y, sobel_img_y[i][j])\n _avg_x = operator.__truediv__(_sum_x, 25)\n _avg_y = operator.__truediv__(_sum_y, 25)\n\n sum_of_ix_ix = 0.\n sum_of_ix_iy = 0.\n sum_of_iy_iy = 0.\n for i in range(5):\n for j in range(5):\n patch_img_x[i][j] = operator.__sub__(patch_img_x[i][j], _avg_x)\n patch_img_y[i][j] = operator.__sub__(patch_img_y[i][j], _avg_y)\n\n sum_of_ix_ix = operator.__add__(sum_of_ix_ix, operator.__mul__(patch_img_x[i][j], patch_img_x[i][j]))\n sum_of_ix_iy = operator.__add__(sum_of_ix_iy, operator.__mul__(patch_img_x[i][j], patch_img_y[i][j]))\n sum_of_iy_iy = operator.__add__(sum_of_iy_iy, operator.__mul__(patch_img_y[i][j], patch_img_y[i][j]))\n\n # ii. get second moment matrix\n # since we use uniform window, just calculated the summation of ix_ix, ix_iy, iy_iy respectively (above).\n M = np.array([[sum_of_ix_ix, sum_of_ix_iy], [sum_of_ix_iy, sum_of_iy_iy]])\n eigenvalues, _ = np.linalg.eig(M)\n #print(eigen_values)\n\n e1 = eigenvalues[0]\n e2 = eigenvalues[1]\n R[x][y] = e1 * e2 - _k * ((e1 + e2) ** 2)\n\n ## d) normalize responses\n # i. negative values to 0, otherwise normalize to range [0, 1]\n if R[x][y] < 0:\n R[x][y] = 0\n if R[x][y] > _max_val:\n _max_val = R[x][y]\n\n if x in elapsed_:\n print('.', end='')\n\n #print(\"_max_val:\", _max_val)\n #normalizer = 1.\n #if _max_val != 0:\n # normalizer = 1 / _max_val\n #normalizer = 1 / 255\n\n #normalizer = 1 / np.linalg.norm(R)\n for x in range(size0):\n for y in range(size1):\n R[x][y] = operator.__truediv__(R[x][y], _max_val)\n return R\n\n\ndef non_maximum_suppression_win(R, winSize):\n ## c) compute local maximas by NMS\n #* input argument R is already thresholded in corner_response_embedding func.\n x_bound = R.shape[0] - winSize + 1\n y_bound = R.shape[1] - winSize + 1\n\n # elapsed_ = list(range(0, R.shape[0], R.shape[1] // 20))\n for x in range(0, x_bound, winSize // 2):\n for y in range(0, y_bound, winSize // 2):\n local_maxima = R[x][y]\n lm_x = x\n lm_y = y\n for i in range(x, x + winSize):\n for j in range(y, y + winSize):\n if R[i][j] > local_maxima:\n local_maxima = R[i][j]\n lm_x = i\n lm_y = j\n R[i][j] = 0\n # print(x, y, local_maxima, lm_x, lm_y)\n R[lm_x][lm_y] = local_maxima\n #if x in elapsed_:\n # print('.', end='')\n return R\n\n\n###\n# 3-1. Gaussian filtering\n###\n\nimg_lenna = cv2.imread('./lenna.png', cv2.IMREAD_GRAYSCALE)\nimg_shapes = cv2.imread('./shapes.png', cv2.IMREAD_GRAYSCALE)\n\nprint('Computer Vision A#1 // Yoseob Kim')\nprint('Part #3. Corner Detection\\n')\n\nprint('3-1. apply gaussian filtering for shapes and lenna. (size=7, sigma=1.5)')\n\nprint('filtering... (for \"shapes.png\")', end='')\nfiltered_img_shapes = my_gaussian_filtering(img_shapes, 7, 1.5)\nprint(' ---> done.')\n\nprint('filtering... (for \"lenna.png\")', end='')\nfiltered_img_lenna = my_gaussian_filtering(img_lenna, 7, 1.5)\nprint(' ---> done.')\nprint()\nprint()\n\n###\n# 3-2. Corner response (execute requirements - ...)\n###\nprint('3-2. compute corner responses for shapes and lenna respectively.')\n\n# for \"shapes.png\"\nprint(' ** corner responses for \"shapes.png\" initiate.')\nprint(' [about 20 dots will be shown to be done]')\n\nstart_time = time.process_time()\nR_shapes = compute_corner_response(filtered_img_shapes)\nelapsed_time = time.process_time() - start_time\nprint(' ---> done. /elapsed time:', elapsed_time)\n\n#print(np.min(R_shapes), np.max(R_shapes))\nR_shapes = cv2.normalize(R_shapes, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\ncv2.imwrite('./result/part_3_corner_raw_shapes.png', R_shapes)\nR_shapes = cv2.normalize(R_shapes, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\nprint(' * corner responses of \"shapes.png\" saved to ./result/ directory.')\n\nprint(' ## notice: press any key (on image window) to continue. !!do not close window!!\\n')\ncv2.imshow(\"corner responses of shapes\", R_shapes)\ncv2.waitKey(0)\n\n# for \"lenna.png\"\nprint(' ** corner responses for \"lenna.png\" initiate.')\nprint(' [about 20 dots will be shown to be done]')\n\nstart_time = time.process_time()\nR_lenna = compute_corner_response(filtered_img_lenna)\nelapsed_time = time.process_time() - start_time\nprint(' ---> done. /elapsed time:', elapsed_time)\n\n#print(np.min(R_lenna), np.max(R_lenna))\nR_lenna = cv2.normalize(R_lenna, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\ncv2.imwrite('./result/part_3_corner_raw_lenna.png', R_lenna)\nR_lenna = cv2.normalize(R_lenna, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\nprint(' * corner responses of \"lenna.png\" saved to ./result/ directory.')\n\nprint(' ## notice: press any key (on image window) to continue. !!do not close window!!\\n')\ncv2.imshow(\"corner responses of lenna\", R_lenna)\ncv2.waitKey(0)\nprint()\n\n\n###\n# 3-3. Thresholding, Non-maximum suppression (execute requirements)\n###\nprint('3-3. thresholding and apply NMS to corner responses for each images.')\n\n## a) change corner response to green\n## b) show image and save\n\ndef corner_response_embedding(R, img):\n # convert gray scale to rgb channel\n normalized_img = cv2.normalize(img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n rgb_img = cv2.cvtColor(normalized_img, cv2.COLOR_GRAY2BGR)\n\n # thresholding (greater than 0.1)\n for i in range(R.shape[0]):\n for j in range(R.shape[1]):\n if R[i][j] > 0.1:\n cv2.circle(rgb_img, (j, i), 5, (0, 1.0, 0), 1)\n else:\n R[i][j] = 0\n\n return R, rgb_img\n\nprint('a, b) thresholding and corner response embedding to original images')\n\n# for shapes.png\n#print(np.min(img_shapes), np.max(img_shapes))\nthresholded_R_shapes, rgb_img_shapes = corner_response_embedding(R_shapes, img_shapes)\n#print(np.min(rgb_img_shapes), np.max(rgb_img_shapes))\nrgb_img_shapes = cv2.normalize(rgb_img_shapes, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\ncv2.imwrite('./result/part_3_corner_bin_shapes.png', rgb_img_shapes)\nprint(' ** corner response embedding of \"shapes.png\" saved to ./result/ directory.')\n\nprint(' ## notice: press any key (on image window) to continue. !!do not close window!!\\n')\ncv2.imshow(\"corner response > 0.1 :: shapes.png\", rgb_img_shapes)\ncv2.waitKey(0)\n\n# for lenna.png\n#print(np.min(img_lenna), np.max(img_lenna))\nthresholded_R_lenna, rgb_img_lenna = corner_response_embedding(R_lenna, img_lenna)\n#print(np.min(rgb_img_lenna), np.max(rgb_img_lenna))\nrgb_img_lenna = cv2.normalize(rgb_img_lenna, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\ncv2.imwrite('./result/part_3_corner_bin_lenna.png', rgb_img_lenna)\nprint(' ** corner response embedding of \"lenna.png\" saved to ./result/ directory.')\n\nprint(' ## notice: press any key (on image window) to continue. !!do not close window!!\\n')\ncv2.imshow(\"corner response > 0.1 :: lenna.png\", rgb_img_lenna)\ncv2.waitKey(0)\n#print()\n\n\n## c) nms\n## d) show image and save\n\nprint(\"c, d) apply non-maximum suppression to corner responses\")\n_winSize = 11\n\n# for \"shapes.png\"\nprint('...applying NMS to R (shapes.png)', end='')\nstart_time = time.process_time()\nsuppressed_R_shapes = non_maximum_suppression_win(thresholded_R_shapes, _winSize)\nelapsed_time = time.process_time() - start_time\nprint(' ---> done. /elapsed time:', elapsed_time)\n\n_, rgb_img_shapes2 = corner_response_embedding(suppressed_R_shapes, img_shapes)\n#print(np.min(rgb_img_shapes2), np.max(rgb_img_shapes2))\nrgb_img_shapes2 = cv2.normalize(rgb_img_shapes2, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\ncv2.imwrite('./result/part_3_corner_sup_shapes.png', rgb_img_shapes2)\nprint(' ** NMS applied R of \"shapes.png\" saved to ./result/ directory.')\n\nprint(' ## notice: press any key (on image window) to continue. !!do not close window!!\\n')\ncv2.imshow(\"non-maximum suppressed :: shapes.png\", rgb_img_shapes2)\ncv2.waitKey(0)\n\n# for \"lenna.png\"\nprint('...applying NMS to R (lenna.png)', end='')\nstart_time = time.process_time()\nsuppressed_R_lenna = non_maximum_suppression_win(thresholded_R_lenna, _winSize)\nelapsed_time = time.process_time() - start_time\nprint(' ---> done. /elapsed time:', elapsed_time)\n\n_, rgb_img_lenna2 = corner_response_embedding(suppressed_R_lenna, img_lenna)\n#print(np.min(rgb_img_lenna2), np.max(rgb_img_lenna2))\nrgb_img_lenna2 = cv2.normalize(rgb_img_lenna2, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\ncv2.imwrite('./result/part_3_corner_sup_lenna.png', rgb_img_lenna2)\nprint(' ** NMS applied R of \"lenna.png\" saved to ./result/ directory.')\n\nprint(' ## notice: P#3 done. press any key (on image window) to finish.\\n')\ncv2.imshow(\"non-maximum suppressed :: lenna.png\", rgb_img_lenna2)\ncv2.waitKey(0)\n\n\ncv2.destroyAllWindows()" ]
[ [ "numpy.reshape" ], [ "numpy.linalg.eig", "numpy.array", "numpy.zeros", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Advestis/adnmtf
[ "7b36da64669894506071a75d8bd341edb0e75b9f" ]
[ "adnmtf/nmtf_core.py" ]
[ "\"\"\"Non-negative matrix and tensor factorization core functions\n\n\"\"\"\n\n# Author: Paul Fogel\n\n# License: MIT\n# Jan 4, '20\nfrom typing import Tuple\n\nimport numpy as np\nfrom .nmtf_utils import EPSILON, sparse_opt\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n# TODO (pcotte): typing\n# TODO (pcotte): docstrings (with parameters and returns)\n\n\ndef ntf_stack(m, mmis, n_blocks):\n \"\"\"Unfold tensor M\n for future use with NMF\n \"\"\"\n n, p = m.shape\n mmis = mmis.astype(np.int)\n n_mmis = mmis.shape[0]\n n_blocks = int(n_blocks)\n\n mstacked = np.zeros((int(n * p / n_blocks), n_blocks))\n if n_mmis > 0:\n mmis_stacked = np.zeros((int(n * p / n_blocks), n_blocks))\n else:\n mmis_stacked = np.array([])\n\n for i_block in range(0, n_blocks):\n for j in range(0, int(p / n_blocks)):\n i1 = j * n\n i2 = i1 + n\n mstacked[i1:i2, i_block] = m[:, int(i_block * p / n_blocks + j)]\n if n_mmis > 0:\n mmis_stacked[i1:i2, i_block] = mmis[:, int(i_block * p / n_blocks + j)]\n\n return mstacked, mmis_stacked\n\n\ndef ntf_solve(\n m,\n mmis,\n mt0,\n mw0,\n mb0,\n nc,\n tolerance,\n log_iter,\n status0,\n max_iterations,\n nmf_fix_user_lhe,\n nmf_fix_user_rhe,\n nmf_fix_user_bhe,\n nmf_sparse_level,\n ntf_unimodal,\n ntf_smooth,\n ntf_left_components,\n ntf_right_components,\n ntf_block_components,\n n_blocks,\n nmf_priors,\n my_status_box,\n):\n \"\"\"Interface to:\n - NTFSolve_simple\n \"\"\"\n\n if len(nmf_priors) > 0:\n n_nmf_priors, nc = nmf_priors.shape\n else:\n n_nmf_priors = 0\n\n if n_nmf_priors > 0:\n nmf_priors[nmf_priors > 0] = 1\n\n return ntf_solve_simple(\n m=m,\n mmis=mmis,\n mt0=mt0,\n mw0=mw0,\n mb0=mb0,\n nc=nc,\n tolerance=tolerance,\n log_iter=log_iter,\n status0=status0,\n max_iterations=max_iterations,\n nmf_fix_user_lhe=nmf_fix_user_lhe,\n nmf_fix_user_rhe=nmf_fix_user_rhe,\n nmf_fix_user_bhe=nmf_fix_user_bhe,\n nmf_sparse_level=nmf_sparse_level,\n ntf_unimodal=ntf_unimodal,\n ntf_smooth=ntf_smooth,\n ntf_left_components=ntf_left_components,\n ntf_right_components=ntf_right_components,\n ntf_block_components=ntf_block_components,\n n_blocks=n_blocks,\n nmf_priors=nmf_priors,\n my_status_box=my_status_box,\n )\n\n\ndef ntf_solve_simple(\n m,\n mmis,\n mt0,\n mw0,\n mb0,\n nc,\n tolerance,\n log_iter,\n status0,\n max_iterations,\n nmf_fix_user_lhe,\n nmf_fix_user_rhe,\n nmf_fix_user_bhe,\n nmf_sparse_level,\n ntf_unimodal,\n ntf_smooth,\n ntf_left_components,\n ntf_right_components,\n ntf_block_components,\n n_blocks,\n nmf_priors,\n my_status_box,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, float, int]:\n \"\"\"\n Estimate NTF matrices (HALS)\n\n Parameters\n ----------\n m: Input matrix\n mmis: Define missing values (0 = missing cell, 1 = real cell)\n mt0: Initial left hand matrix\n mw0: Initial right hand matrix\n mb0: Initial block hand matrix\n nc: NTF rank\n tolerance: Convergence threshold\n log_iter: Log results through iterations\n status0: Initial displayed status to be updated during iterations\n max_iterations: Max iterations\n nmf_fix_user_lhe: = 1 => fixed left hand matrix columns\n nmf_fix_user_rhe: = 1 => fixed right hand matrix columns\n nmf_fix_user_bhe: = 1 => fixed block hand matrix columns\n nmf_sparse_level: sparsity level (as defined by Hoyer); +/- = make RHE/LHe sparse\n ntf_unimodal: Apply Unimodal constraint on factoring vectors\n ntf_smooth: Apply Smooth constraint on factoring vectors\n ntf_left_components: Apply Unimodal/Smooth constraint on left hand matrix\n ntf_right_components: Apply Unimodal/Smooth constraint on right hand matrix\n ntf_block_components: Apply Unimodal/Smooth constraint on block hand matrix\n n_blocks: Number of NTF blocks\n nmf_priors: Elements in mw that should be updated (others remain 0)\n my_status_box\n\n Returns\n -------\n Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, float, int]\\n\n * mt: Left hand matrix\\n\n * mw: Right hand matrix\\n\n * mb: Block hand matrix\\n\n * diff: objective cost\\n\n * cancel_pressed\\n\n\n Reference\n ---------\n a. Cichocki, P.H.a.N. Anh-Huym, Fast local algorithms for large scale nonnegative matrix and tensor factorizations,\n IEICE Trans. Fundam. Electron. Commun. Comput. Sci. 92 (3) (2009) 708–721.\n \"\"\"\n\n cancel_pressed = 0\n\n n, p0 = m.shape\n n_mmis = mmis.shape[0]\n nc = int(nc)\n n_blocks = int(n_blocks)\n p = int(p0 / n_blocks)\n nxp = int(n * p)\n nxp0 = int(n * p0)\n mt = np.copy(mt0)\n mw = np.copy(mw0)\n mb = np.copy(mb0)\n # step_iter = math.ceil(MaxIterations/10)\n step_iter = 1\n pbar_step = 100 * step_iter / max_iterations\n\n id_blockp = np.arange(0, (n_blocks - 1) * p + 1, p)\n a = np.zeros(n)\n b = np.zeros(p)\n c = np.zeros(n_blocks)\n alpha = np.zeros(nc)\n\n # Compute Residual tensor\n mfit = np.zeros((n, p0))\n for k in range(0, nc):\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n mfit[:, id_blockp[i_block]: id_blockp[i_block] + p] += (\n mb[i_block, k] * np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))\n )\n else:\n mfit[:, id_blockp[0]: id_blockp[0] + p] += np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))\n\n denomt = np.zeros(n)\n denomw = np.zeros(p)\n denom_block = np.zeros((n_blocks, nc))\n mt2 = np.zeros(n)\n mw2 = np.zeros(p)\n mt_mw = np.zeros(nxp)\n denom_cutoff = 0.1\n\n if n_mmis > 0:\n mres = (m - mfit) * mmis\n else:\n mres = m - mfit\n\n my_status_box.init_bar()\n\n # Loop\n cont = 1\n i_iter = 0\n diff0 = 1.0e99\n mpart = np.zeros((n, p0))\n if abs(nmf_sparse_level) < 1:\n alpha[0] = nmf_sparse_level * 0.8\n else:\n alpha[0] = nmf_sparse_level\n\n percent_zeros = 0\n iter_sparse = 0\n\n while (cont > 0) & (i_iter < max_iterations):\n for k in range(0, nc):\n (\n n_blocks,\n mpart,\n id_blockp,\n p,\n mb,\n k,\n mt,\n n,\n mw,\n n_mmis,\n mmis,\n mres,\n nmf_fix_user_lhe,\n denomt,\n mw2,\n denom_cutoff,\n alpha,\n ntf_unimodal,\n ntf_left_components,\n ntf_smooth,\n a,\n nmf_fix_user_rhe,\n denomw,\n mt2,\n ntf_right_components,\n b,\n nmf_fix_user_bhe,\n mt_mw,\n nxp,\n denom_block,\n ntf_block_components,\n c,\n mfit,\n nmf_priors,\n ) = ntf_update(\n n_blocks=n_blocks,\n mpart=mpart,\n id_blockp=id_blockp,\n p=p,\n mb=mb,\n k=k,\n mt=mt,\n n=n,\n mw=mw,\n n_mmis=n_mmis,\n mmis=mmis,\n mres=mres,\n nmf_fix_user_lhe=nmf_fix_user_lhe,\n denomt=denomt,\n mw2=mw2,\n denom_cutoff=denom_cutoff,\n alpha=alpha,\n ntf_unimodal=ntf_unimodal,\n ntf_left_components=ntf_left_components,\n ntf_smooth=ntf_smooth,\n a=a,\n nmf_fix_user_rhe=nmf_fix_user_rhe,\n denomw=denomw,\n mt2=mt2,\n ntf_right_components=ntf_right_components,\n b=b,\n nmf_fix_user_bhe=nmf_fix_user_bhe,\n mt_mw=mt_mw,\n nxp=nxp,\n denom_block=denom_block,\n ntf_block_components=ntf_block_components,\n c=c,\n mfit=mfit,\n nmf_priors=nmf_priors,\n )\n\n if i_iter % step_iter == 0:\n # Check convergence\n diff = np.linalg.norm(mres) ** 2 / nxp0\n if (diff0 - diff) / diff0 < tolerance:\n cont = 0\n else:\n if diff > diff0:\n my_status_box.my_print(f\"{status0} Iter: {i_iter} MSR does not improve\")\n\n diff0 = diff\n\n Status = f\"{status0} Iteration: {i_iter}\"\n\n if nmf_sparse_level != 0:\n Status = f\"{Status} ; Achieved sparsity: {round(percent_zeros, 2)}; alpha: {round(alpha[0], 2)}\"\n if log_iter == 1:\n my_status_box.my_print(Status)\n\n my_status_box.update_status(status=Status)\n my_status_box.update_bar(step=pbar_step)\n if my_status_box.cancel_pressed:\n cancel_pressed = 1\n return np.array([]), mt, mw, mb, mres, cancel_pressed\n\n if log_iter == 1:\n my_status_box.my_print(status0 + \" Iter: \" + str(i_iter) + \" MSR: \" + str(diff))\n\n i_iter += 1\n\n if cont == 0 or i_iter == max_iterations or (cont == 0 and abs(nmf_sparse_level) == 1):\n if 0 < nmf_sparse_level < 1:\n sparse_test = np.zeros((nc, 1))\n percent_zeros0 = percent_zeros\n for k in range(0, nc):\n sparse_test[k] = np.where(mw[:, k] == 0)[0].size\n\n percent_zeros = np.mean(sparse_test) / p\n if percent_zeros < percent_zeros0:\n iter_sparse += 1\n else:\n iter_sparse = 0\n\n if (percent_zeros < 0.99 * nmf_sparse_level) & (iter_sparse < 50):\n alpha[0] *= min(1.05 * nmf_sparse_level / percent_zeros, 1.1)\n if alpha[0] < 1:\n i_iter = 0\n cont = 1\n\n elif 0 > nmf_sparse_level > -1:\n sparse_test = np.zeros((nc, 1))\n percent_zeros0 = percent_zeros\n for k in range(0, nc):\n sparse_test[k] = np.where(mt[:, k] == 0)[0].size\n\n percent_zeros = np.mean(sparse_test) / n\n if percent_zeros < percent_zeros0:\n iter_sparse += 1\n else:\n iter_sparse = 0\n\n if (percent_zeros < 0.99 * abs(nmf_sparse_level)) & (iter_sparse < 50):\n alpha[0] *= min(1.05 * abs(nmf_sparse_level) / percent_zeros, 1.1)\n if abs(alpha[0]) < 1:\n i_iter = 0\n cont = 1\n\n elif abs(alpha[0]) == 1:\n if alpha[0] == -1:\n for k in range(0, nc):\n if np.max(mt[:, k]) > 0:\n hhi = int(\n np.round(\n (np.linalg.norm(mt[:, k], ord=1) / (np.linalg.norm(mt[:, k], ord=2) + EPSILON))\n ** 2,\n decimals=0,\n )\n )\n alpha[k] = -1 - (n - hhi) / (n - 1)\n else:\n alpha[k] = 0\n else:\n for k in range(0, nc):\n if np.max(mw[:, k]) > 0:\n hhi = int(\n np.round(\n (np.linalg.norm(mw[:, k], ord=1) / (np.linalg.norm(mw[:, k], ord=2) + EPSILON))\n ** 2,\n decimals=0,\n )\n )\n alpha[k] = 1 + (p - hhi) / (p - 1)\n else:\n alpha[k] = 0\n\n if alpha[0] <= -1:\n alpha_real = -(alpha + 1)\n # noinspection PyTypeChecker\n alpha_min = min(alpha_real)\n for k in range(0, nc):\n # noinspection PyUnresolvedReferences\n alpha[k] = min(alpha_real[k], 2 * alpha_min)\n alpha[k] = -alpha[k] - 1\n else:\n alpha_real = alpha - 1\n alpha_min = min(alpha_real)\n for k in range(0, nc):\n alpha[k] = min(alpha_real[k], 2 * alpha_min)\n alpha[k] = alpha[k] + 1\n\n i_iter = 0\n cont = 1\n diff0 = 1.0e99\n\n for k in range(0, nc):\n hhi = np.round((np.linalg.norm(mt[:, k], ord=1) / np.linalg.norm(mt[:, k], ord=2)) ** 2, decimals=0)\n logger.info(f\"component: {k}, left hhi: {hhi}\")\n hhi = np.round((np.linalg.norm(mw[:, k], ord=1) / np.linalg.norm(mw[:, k], ord=2)) ** 2, decimals=0)\n logger.info(f\"component: {k} right hhi: {hhi}\")\n\n if (n_mmis > 0) & (nmf_fix_user_bhe == 0):\n mb *= denom_block\n\n # TODO (pcotte): mt and mw can be not yet referenced: fix that\n return np.array([]), mt, mw, mb, diff, cancel_pressed\n\n\ndef ntf_update(\n n_blocks,\n mpart,\n id_blockp,\n p,\n mb,\n k,\n mt,\n n,\n mw,\n n_mmis,\n mmis,\n mres,\n nmf_fix_user_lhe,\n denomt,\n mw2,\n denom_cutoff,\n alpha,\n ntf_unimodal,\n ntf_left_components,\n ntf_smooth,\n a,\n nmf_fix_user_rhe,\n denomw,\n mt2,\n ntf_right_components,\n b,\n nmf_fix_user_bhe,\n mt_mw,\n nxp,\n denom_block,\n ntf_block_components,\n c,\n mfit,\n nmf_priors,\n):\n \"\"\"Core updating code called by NTFSolve_simple & NTF Solve_conv\n Input:\n All variables in the calling function used in the function\n Output:\n Same as Input\n \"\"\"\n\n if len(nmf_priors) > 0:\n n_nmf_priors, nc = nmf_priors.shape\n else:\n n_nmf_priors = 0\n\n # Compute kth-part\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n mpart[:, id_blockp[i_block]: id_blockp[i_block] + p] = (\n mb[i_block, k] * np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))\n )\n else:\n mpart[:, id_blockp[0]: id_blockp[0] + p] = np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))\n\n if n_mmis > 0:\n mpart *= mmis\n\n mpart += mres\n\n if nmf_fix_user_bhe > 0:\n norm_bhe = True\n if nmf_fix_user_rhe == 0:\n norm_lhe = True\n norm_rhe = False\n else:\n norm_lhe = False\n norm_rhe = True\n else:\n norm_bhe = False\n norm_lhe = True\n norm_rhe = True\n\n if (nmf_fix_user_lhe > 0) & norm_lhe:\n norm = np.linalg.norm(mt[:, k])\n if norm > 0:\n mt[:, k] /= norm\n\n if (nmf_fix_user_rhe > 0) & norm_rhe:\n norm = np.linalg.norm(mw[:, k])\n if norm > 0:\n mw[:, k] /= norm\n\n if (nmf_fix_user_bhe > 0) & norm_bhe & (n_blocks > 1):\n norm = np.linalg.norm(mb[:, k])\n if norm > 0:\n mb[:, k] /= norm\n\n if nmf_fix_user_lhe == 0:\n # Update Mt\n mt[:, k] = 0\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n mt[:, k] += mb[i_block, k] * mpart[:, id_blockp[i_block]: id_blockp[i_block] + p] @ mw[:, k]\n else:\n mt[:, k] += mpart[:, id_blockp[0]: id_blockp[0] + p] @ mw[:, k]\n\n if n_mmis > 0:\n denomt[:] = 0\n mw2[:] = mw[:, k] ** 2\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n # Broadcast missing cells into Mw to calculate Mw.T * Mw\n denomt += mb[i_block, k] ** 2 * mmis[:, id_blockp[i_block]: id_blockp[i_block] + p] @ mw2\n else:\n denomt += mmis[:, id_blockp[0]: id_blockp[0] + p] @ mw2\n\n denomt /= np.max(denomt)\n denomt[denomt < denom_cutoff] = denom_cutoff\n mt[:, k] /= denomt\n\n mt[mt[:, k] < 0, k] = 0\n if alpha[0] < 0:\n if alpha[0] <= -1:\n if (alpha[0] == -1) & (np.max(mt[:, k]) > 0):\n t_threshold = mt[:, k]\n hhi = int(\n np.round(\n (np.linalg.norm(t_threshold, ord=1) / (np.linalg.norm(t_threshold, ord=2) + EPSILON)) ** 2,\n decimals=0,\n )\n )\n t_rank = np.argsort(t_threshold)\n t_threshold[t_rank[0: n - hhi]] = 0\n else:\n mt[:, k] = sparse_opt(mt[:, k], -alpha[k] - 1, False)\n else:\n mt[:, k] = sparse_opt(mt[:, k], -alpha[0], False)\n\n if (ntf_unimodal > 0) & (ntf_left_components > 0):\n # Enforce unimodal distribution\n tmax = np.argmax(mt[:, k])\n for i in range(tmax + 1, n):\n mt[i, k] = min(mt[i - 1, k], mt[i, k])\n\n for i in range(tmax - 1, -1, -1):\n mt[i, k] = min(mt[i + 1, k], mt[i, k])\n\n if (ntf_smooth > 0) & (ntf_left_components > 0):\n # Smooth distribution\n a[0] = 0.75 * mt[0, k] + 0.25 * mt[1, k]\n a[n - 1] = 0.25 * mt[n - 2, k] + 0.75 * mt[n - 1, k]\n for i in range(1, n - 1):\n a[i] = 0.25 * mt[i - 1, k] + 0.5 * mt[i, k] + 0.25 * mt[i + 1, k]\n\n mt[:, k] = a\n\n if norm_lhe:\n norm = np.linalg.norm(mt[:, k])\n if norm > 0:\n mt[:, k] /= norm\n\n if nmf_fix_user_rhe == 0:\n # Update Mw\n mw[:, k] = 0\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n mw[:, k] += mpart[:, id_blockp[i_block]: id_blockp[i_block] + p].T @ mt[:, k] * mb[i_block, k]\n else:\n mw[:, k] += mpart[:, id_blockp[0]: id_blockp[0] + p].T @ mt[:, k]\n\n if n_mmis > 0:\n denomw[:] = 0\n mt2[:] = mt[:, k] ** 2\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n # Broadcast missing cells into Mw to calculate Mt.T * Mt\n denomw += mb[i_block, k] ** 2 * mmis[:, id_blockp[i_block]: id_blockp[i_block] + p].T @ mt2\n else:\n denomw += mmis[:, id_blockp[0]: id_blockp[0] + p].T @ mt2\n\n denomw /= np.max(denomw)\n denomw[denomw < denom_cutoff] = denom_cutoff\n mw[:, k] /= denomw\n\n mw[mw[:, k] < 0, k] = 0\n\n if alpha[0] > 0:\n if alpha[0] >= 1:\n if (alpha[0] == 1) & (np.max(mw[:, k]) > 0):\n w_threshold = mw[:, k]\n hhi = int(\n np.round(\n (np.linalg.norm(w_threshold, ord=1) / (np.linalg.norm(w_threshold, ord=2) + EPSILON)) ** 2,\n decimals=0,\n )\n )\n w_rank = np.argsort(w_threshold)\n w_threshold[w_rank[0: p - hhi]] = 0\n else:\n mw[:, k] = sparse_opt(mw[:, k], alpha[k] - 1, False)\n else:\n mw[:, k] = sparse_opt(mw[:, k], alpha[0], False)\n\n if (ntf_unimodal > 0) & (ntf_right_components > 0):\n # Enforce unimodal distribution\n wmax = np.argmax(mw[:, k])\n for j in range(wmax + 1, p):\n mw[j, k] = min(mw[j - 1, k], mw[j, k])\n\n for j in range(wmax - 1, -1, -1):\n mw[j, k] = min(mw[j + 1, k], mw[j, k])\n\n if (ntf_smooth > 0) & (ntf_right_components > 0):\n # Smooth distribution\n b[0] = 0.75 * mw[0, k] + 0.25 * mw[1, k]\n b[p - 1] = 0.25 * mw[p - 2, k] + 0.75 * mw[p - 1, k]\n for j in range(1, p - 1):\n b[j] = 0.25 * mw[j - 1, k] + 0.5 * mw[j, k] + 0.25 * mw[j + 1, k]\n\n mw[:, k] = b\n\n if n_nmf_priors > 0:\n mw[:, k] = mw[:, k] * nmf_priors[:, k]\n\n if norm_rhe:\n norm = np.linalg.norm(mw[:, k])\n if norm > 0:\n mw[:, k] /= norm\n\n if nmf_fix_user_bhe == 0:\n # Update Mb\n mb[:, k] = 0\n mt_mw[:] = np.reshape((np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))), nxp)\n\n for i_block in range(0, n_blocks):\n mb[i_block, k] = np.reshape(mpart[:, id_blockp[i_block]: id_blockp[i_block] + p], nxp).T @ mt_mw\n\n if n_mmis > 0:\n mt_mw[:] = mt_mw[:] ** 2\n for i_block in range(0, n_blocks):\n # Broadcast missing cells into Mb to calculate Mb.T * Mb\n denom_block[i_block, k] = (\n np.reshape(mmis[:, id_blockp[i_block]: id_blockp[i_block] + p], (1, nxp)) @ mt_mw\n )\n\n maxdenom_block = np.max(denom_block[:, k])\n denom_block[denom_block[:, k] < denom_cutoff * maxdenom_block] = denom_cutoff * maxdenom_block\n mb[:, k] /= denom_block[:, k]\n\n mb[mb[:, k] < 0, k] = 0\n\n if (ntf_unimodal > 0) & (ntf_block_components > 0):\n # Enforce unimodal distribution\n bmax = np.argmax(mb[:, k])\n for i_block in range(bmax + 1, n_blocks):\n mb[i_block, k] = min(mb[i_block - 1, k], mb[i_block, k])\n\n for i_block in range(bmax - 1, -1, -1):\n mb[i_block, k] = min(mb[i_block + 1, k], mb[i_block, k])\n\n if (ntf_smooth > 0) & (ntf_block_components > 0):\n # Smooth distribution\n c[0] = 0.75 * mb[0, k] + 0.25 * mb[1, k]\n c[n_blocks - 1] = 0.25 * mb[n_blocks - 2, k] + 0.75 * mb[n_blocks - 1, k]\n for i_block in range(1, n_blocks - 1):\n c[i_block] = 0.25 * mb[i_block - 1, k] + 0.5 * mb[i_block, k] + 0.25 * mb[i_block + 1, k]\n\n mb[:, k] = c\n\n if norm_bhe:\n norm = np.linalg.norm(mb[:, k])\n if norm > 0:\n mb[:, k] /= norm\n\n # Update residual tensor\n mfit[:, :] = 0\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n mfit[:, id_blockp[i_block]: id_blockp[i_block] + p] += (\n mb[i_block, k] * np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))\n )\n else:\n mfit[:, id_blockp[0]: id_blockp[0] + p] += np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))\n\n if n_mmis > 0:\n mres[:, :] = (mpart - mfit) * mmis\n else:\n mres[:, :] = mpart - mfit\n\n return (\n n_blocks,\n mpart,\n id_blockp,\n p,\n mb,\n k,\n mt,\n n,\n mw,\n n_mmis,\n mmis,\n mres,\n nmf_fix_user_lhe,\n denomt,\n mw2,\n denom_cutoff,\n alpha,\n ntf_unimodal,\n ntf_left_components,\n ntf_smooth,\n a,\n nmf_fix_user_rhe,\n denomw,\n mt2,\n ntf_right_components,\n b,\n nmf_fix_user_bhe,\n mt_mw,\n nxp,\n denom_block,\n ntf_block_components,\n c,\n mfit,\n nmf_priors,\n )\n" ]
[ [ "numpy.reshape", "numpy.arange", "numpy.linalg.norm", "numpy.max", "numpy.copy", "numpy.argmax", "numpy.mean", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wwt17/texar-pytorch
[ "9fb3ae8f7b541da5c808357033a93fba1817bfbd", "9fb3ae8f7b541da5c808357033a93fba1817bfbd", "9fb3ae8f7b541da5c808357033a93fba1817bfbd", "9fb3ae8f7b541da5c808357033a93fba1817bfbd" ]
[ "texar/torch/modules/decoders/rnn_decoders_test.py", "examples/gpt-2/gpt2_train_main.py", "texar/torch/modules/classifiers/bert_classifier_test.py", "texar/torch/modules/decoders/transformer_decoders.py" ]
[ "\"\"\"\nUnit tests for RNN decoders.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom texar.torch.hyperparams import HParams\nfrom texar.torch.modules.decoders.decoder_helpers import get_helper\nfrom texar.torch.modules.decoders.rnn_decoders import (\n AttentionRNNDecoder, AttentionRNNDecoderOutput, BasicRNNDecoder,\n BasicRNNDecoderOutput)\nfrom texar.torch.modules.embedders.embedders import WordEmbedder\nfrom texar.torch.utils.utils import map_structure\n\n\nclass BasicRNNDecoderTest(unittest.TestCase):\n r\"\"\"Tests :class:`~texar.torch.modules.decoders.rnn_decoders.BasicRNNDecoder`.\n \"\"\"\n\n def setUp(self):\n self._vocab_size = 4\n self._max_time = 8\n self._batch_size = 16\n self._emb_dim = 20\n self._inputs = torch.randint(\n self._vocab_size, size=(self._batch_size, self._max_time))\n embedding = torch.rand(\n self._vocab_size, self._emb_dim, dtype=torch.float)\n self._embedder = WordEmbedder(init_value=embedding)\n self._hparams = HParams(None, BasicRNNDecoder.default_hparams())\n\n def _test_outputs(self, decoder, outputs, final_state, sequence_lengths,\n test_mode=False):\n hidden_size = decoder.hparams.rnn_cell.kwargs.num_units\n\n self.assertIsInstance(outputs, BasicRNNDecoderOutput)\n max_time = (self._max_time if not test_mode\n else max(sequence_lengths).item())\n self.assertEqual(\n outputs.logits.shape,\n (self._batch_size, max_time, self._vocab_size))\n if not test_mode:\n np.testing.assert_array_equal(\n sequence_lengths, [max_time] * self._batch_size)\n self.assertEqual(final_state[0].shape, (self._batch_size, hidden_size))\n\n def test_decode_train(self):\n r\"\"\"Tests decoding in training mode.\n \"\"\"\n decoder = BasicRNNDecoder(\n token_embedder=self._embedder, input_size=self._emb_dim,\n vocab_size=self._vocab_size, hparams=self._hparams)\n sequence_length = torch.tensor([self._max_time] * self._batch_size)\n\n # Helper by default HParams\n helper_train = decoder.create_helper()\n outputs, final_state, sequence_lengths = decoder(\n helper=helper_train, inputs=self._inputs,\n sequence_length=sequence_length)\n self._test_outputs(decoder, outputs, final_state, sequence_lengths)\n\n # Helper by decoding strategy\n helper_train = decoder.create_helper(decoding_strategy='train_greedy')\n outputs, final_state, sequence_lengths = decoder(\n helper=helper_train, inputs=self._inputs,\n sequence_length=sequence_length)\n self._test_outputs(decoder, outputs, final_state, sequence_lengths)\n\n # Implicit helper\n outputs, final_state, sequence_lengths = decoder(\n inputs=self._inputs, sequence_length=sequence_length)\n self._test_outputs(decoder, outputs, final_state, sequence_lengths)\n\n # Eval helper through forward args\n outputs, final_state, sequence_lengths = decoder(\n embedding=self._embedder,\n start_tokens=torch.tensor([1] * self._batch_size),\n end_token=2, infer_mode=True)\n self._test_outputs(\n decoder, outputs, final_state, sequence_lengths, test_mode=True)\n\n @staticmethod\n def _assert_tensor_equal(a: torch.Tensor, b: torch.Tensor) -> bool:\n if torch.is_tensor(a):\n a = a.detach().numpy()\n if torch.is_tensor(b):\n b = b.detach().numpy()\n if any(np.issubdtype(array.dtype, np.floating) for array in [a, b]):\n return np.testing.assert_allclose(a, b, rtol=1e-5, atol=1e-8)\n return np.testing.assert_array_equal(a, b)\n\n def test_decode_train_with_torch(self):\n r\"\"\"Compares decoding results with PyTorch built-in decoder.\n \"\"\"\n decoder = BasicRNNDecoder(\n token_embedder=self._embedder, input_size=self._emb_dim,\n vocab_size=self._vocab_size, hparams=self._hparams)\n\n input_size = self._emb_dim\n hidden_size = decoder.hparams.rnn_cell.kwargs.num_units\n num_layers = decoder.hparams.rnn_cell.num_layers\n torch_lstm = nn.LSTM(input_size, hidden_size, num_layers,\n batch_first=True)\n\n # match parameters\n for name in ['weight_ih', 'weight_hh', 'bias_ih', 'bias_hh']:\n setattr(torch_lstm, f'{name}_l0',\n getattr(decoder._cell._cell, name))\n torch_lstm.flatten_parameters()\n\n output_layer = decoder._output_layer\n input_lengths = torch.tensor([self._max_time] * self._batch_size)\n inputs = torch.randint(\n self._vocab_size, size=(self._batch_size, self._max_time))\n\n # decoder outputs\n helper_train = decoder.create_helper()\n outputs, final_state, sequence_lengths = decoder(\n inputs=inputs,\n sequence_length=input_lengths,\n helper=helper_train)\n\n # torch LSTM outputs\n lstm_inputs = F.embedding(inputs, self._embedder.embedding)\n torch_outputs, torch_states = torch_lstm(lstm_inputs)\n torch_outputs = output_layer(torch_outputs)\n torch_sample_id = torch.argmax(torch_outputs, dim=-1)\n\n self.assertEqual(final_state[0].shape,\n (self._batch_size, hidden_size))\n\n self._assert_tensor_equal(outputs.logits, torch_outputs)\n self._assert_tensor_equal(outputs.sample_id, torch_sample_id)\n self._assert_tensor_equal(final_state[0], torch_states[0].squeeze(0))\n self._assert_tensor_equal(final_state[1], torch_states[1].squeeze(0))\n self._assert_tensor_equal(sequence_lengths, input_lengths)\n\n def test_decode_infer(self):\n r\"\"\"Tests decoding in inference mode.\"\"\"\n decoder = BasicRNNDecoder(\n token_embedder=self._embedder, input_size=self._emb_dim,\n vocab_size=self._vocab_size, hparams=self._hparams)\n\n decoder.eval()\n start_tokens = torch.tensor([self._vocab_size - 2] * self._batch_size)\n\n helpers = []\n for strategy in ['infer_greedy', 'infer_sample']:\n helper = decoder.create_helper(\n decoding_strategy=strategy,\n start_tokens=start_tokens,\n end_token=self._vocab_size - 1)\n helpers.append(helper)\n for klass in ['TopKSampleEmbeddingHelper', 'SoftmaxEmbeddingHelper',\n 'GumbelSoftmaxEmbeddingHelper']:\n helper = get_helper(\n klass, start_tokens=start_tokens,\n end_token=self._vocab_size - 1,\n top_k=self._vocab_size // 2, tau=2.0,\n straight_through=True)\n helpers.append(helper)\n\n for helper in helpers:\n max_length = 100\n outputs, final_state, sequence_lengths = decoder(\n helper=helper, max_decoding_length=max_length)\n self.assertLessEqual(max(sequence_lengths), max_length)\n self._test_outputs(decoder, outputs, final_state, sequence_lengths,\n test_mode=True)\n\n\nclass AttentionRNNDecoderTest(unittest.TestCase):\n r\"\"\"Tests :class:`~texar.torch.modules.decoders.rnn_decoders.AttentionRNNDecoder`.\n \"\"\"\n\n def setUp(self):\n self._vocab_size = 10\n self._max_time = 16\n self._batch_size = 8\n self._emb_dim = 20\n self._attention_dim = 256\n self._inputs = torch.randint(\n self._vocab_size, size=(self._batch_size, self._max_time))\n embedding = torch.rand(\n self._vocab_size, self._emb_dim, dtype=torch.float)\n self._embedder = WordEmbedder(init_value=embedding)\n self._encoder_output = torch.rand(\n self._batch_size, self._max_time, 64)\n\n self._test_hparams = {} # (cell_type, is_multi) -> hparams\n for cell_type in [\"RNNCell\", \"LSTMCell\", \"GRUCell\"]:\n hparams = {\n \"rnn_cell\": {\n 'type': cell_type,\n 'kwargs': {\n 'num_units': 256,\n },\n },\n \"attention\": {\n \"kwargs\": {\n \"num_units\": self._attention_dim\n },\n }\n }\n self._test_hparams[(cell_type, False)] = HParams(\n hparams, AttentionRNNDecoder.default_hparams())\n\n hparams = {\n \"rnn_cell\": {\n 'type': 'LSTMCell',\n 'kwargs': {\n 'num_units': 256,\n },\n 'num_layers': 3,\n },\n \"attention\": {\n \"kwargs\": {\n \"num_units\": self._attention_dim\n },\n }\n }\n self._test_hparams[(\"LSTMCell\", True)] = HParams(\n hparams, AttentionRNNDecoder.default_hparams())\n\n def _test_outputs(self, decoder, outputs, final_state, sequence_lengths,\n test_mode=False):\n hidden_size = decoder.hparams.rnn_cell.kwargs.num_units\n cell_type = decoder.hparams.rnn_cell.type\n is_multi = decoder.hparams.rnn_cell.num_layers > 1\n\n self.assertIsInstance(outputs, AttentionRNNDecoderOutput)\n max_time = (self._max_time if not test_mode\n else max(sequence_lengths).item())\n self.assertEqual(\n outputs.logits.shape,\n (self._batch_size, max_time, self._vocab_size))\n if not test_mode:\n np.testing.assert_array_equal(\n sequence_lengths, [max_time] * self._batch_size)\n\n map_structure(\n lambda t: self.assertEqual(\n t.size(), (self._batch_size, hidden_size)),\n final_state.cell_state)\n state = final_state.cell_state\n if is_multi:\n self.assertIsInstance(state, list)\n state = state[0]\n if cell_type == \"LSTMCell\":\n self.assertIsInstance(state, tuple)\n state = state[0]\n self.assertIsInstance(state, torch.Tensor)\n\n def test_decode_infer(self):\n r\"\"\"Tests decoding in inference mode.\n \"\"\"\n seq_length = np.random.randint(\n self._max_time, size=[self._batch_size]) + 1\n encoder_values_length = torch.tensor(seq_length)\n\n for (cell_type, is_multi), hparams in self._test_hparams.items():\n decoder = AttentionRNNDecoder(\n encoder_output_size=64,\n token_embedder=self._embedder,\n vocab_size=self._vocab_size,\n input_size=self._emb_dim,\n hparams=hparams)\n\n decoder.eval()\n\n helper_infer = decoder.create_helper(\n start_tokens=torch.tensor([1] * self._batch_size), end_token=2)\n\n outputs, final_state, sequence_lengths = decoder(\n memory=self._encoder_output,\n memory_sequence_length=encoder_values_length,\n helper=helper_infer)\n\n self._test_outputs(decoder, outputs, final_state, sequence_lengths,\n test_mode=True)\n\n def test_decode_train(self):\n r\"\"\"Tests decoding in training mode.\n \"\"\"\n seq_length = np.random.randint(\n self._max_time, size=[self._batch_size]) + 1\n encoder_values_length = torch.tensor(seq_length)\n\n for (cell_type, is_multi), hparams in self._test_hparams.items():\n decoder = AttentionRNNDecoder(\n encoder_output_size=64,\n token_embedder=self._embedder,\n vocab_size=self._vocab_size,\n input_size=self._emb_dim,\n hparams=hparams)\n\n sequence_length = torch.tensor([self._max_time] * self._batch_size)\n\n helper_train = decoder.create_helper()\n outputs, final_state, sequence_lengths = decoder(\n memory=self._encoder_output,\n memory_sequence_length=encoder_values_length,\n helper=helper_train,\n inputs=self._inputs,\n sequence_length=sequence_length)\n\n self._test_outputs(decoder, outputs, final_state, sequence_lengths)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright 2019 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Example of fine-tuning OpenAI GPT-2 language model.\n\"\"\"\n\nimport os\nimport argparse\nimport importlib\n\nimport torch\nimport texar.torch as tx\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '--checkpoint', type=str, default=None,\n help=\"Model checkpoint to load model weights from.\")\nparser.add_argument(\n \"--pretrained-model-name\", type=str, default=\"gpt2-small\",\n choices=tx.modules.GPT2Decoder.available_checkpoints(),\n help=\"Name of the pre-trained checkpoint to load.\")\nparser.add_argument(\n '--config-train', type=str, default=\"config_train\",\n help=\"Configurations of GPT-2 training, including data and \"\n \"optimization hyperparameters.\")\nparser.add_argument(\n \"--output-dir\", default=\"output/\",\n help=\"The output directory where the model checkpoints will be written.\")\nparser.add_argument(\n '--temperature', type=float, default=0.7,\n help=\"Softmax temperature for top-k sample decoding. Must be strictly \"\n \"greater than 0. Defaults to 0.7.\")\nparser.add_argument(\n '--top-k', type=int, default=40,\n help=\"The number of top most likely candidates from a vocab distribution.\")\nparser.add_argument(\n '--top-p', type=float, default=None,\n help=\"Select tokens with cumulative probability of at most 'p' when \"\n \"arranged in decreasing order. This will use \"\n \"TopPSampleEmbeddingHelper for decoding.\")\nparser.add_argument(\n \"--do-train\", action=\"store_true\", help=\"Whether to run training.\")\nparser.add_argument(\n \"--do-eval\", action=\"store_true\",\n help=\"Whether to run eval on the dev set.\")\nparser.add_argument(\n \"--do-test\", action=\"store_true\",\n help=\"Whether to run test on the test set.\")\n\nargs = parser.parse_args()\n\nconfig_train = importlib.import_module(args.config_train)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef main():\n \"\"\"\n Builds the model and runs.\n \"\"\"\n tx.utils.maybe_create_dir(args.output_dir)\n\n max_decoding_length = config_train.max_decoding_length\n\n # Build the GPT-2 model\n model = tx.modules.GPT2Decoder(args.pretrained_model_name)\n if args.checkpoint:\n ckpt = torch.load(args.checkpoint)\n model.load_state_dict(ckpt['model'])\n model.to(device)\n\n if max_decoding_length > model.hparams.position_size:\n raise ValueError(\n \"max_decoding_length should not be greater than position size\")\n\n # Create a GPT-2 tokenizer (BPE encoding)\n tokenizer = tx.data.GPT2Tokenizer(\n pretrained_model_name=args.pretrained_model_name)\n\n # Loads data\n datasets = {}\n if args.do_train:\n train_dataset = tx.data.RecordData(\n hparams=config_train.train_hparam, device=device)\n datasets['train'] = train_dataset\n if args.do_eval:\n eval_dataset = tx.data.RecordData(\n hparams=config_train.eval_hparam, device=device)\n datasets['eval'] = eval_dataset\n if args.do_test:\n test_dataset = tx.data.RecordData(\n hparams=config_train.test_hparam, device=device)\n datasets['test'] = test_dataset\n iterator = tx.data.DataIterator(datasets)\n\n # For training\n train_op = tx.core.get_train_op(\n params=model.parameters(), hparams=config_train.opt)\n\n end_token = tokenizer.map_token_to_id('<|endoftext|>')\n\n def _get_helper(start_tokens):\n if args.top_p:\n helper = tx.modules.TopPSampleEmbeddingHelper(\n start_tokens=start_tokens,\n end_token=end_token,\n p=args.top_p,\n softmax_temperature=args.temperature)\n else:\n helper = tx.modules.TopKSampleEmbeddingHelper(\n start_tokens=start_tokens,\n end_token=end_token,\n top_k=args.top_k,\n softmax_temperature=args.temperature)\n return helper\n\n dis_steps = config_train.display_steps\n eval_steps = config_train.eval_steps\n\n eval_best = {\"loss\": 1e8, \"ppl\": 1e8}\n\n def _train_epoch():\n r\"\"\"Trains on the training set, and evaluates on the dev set\n periodically.\n \"\"\"\n iterator.switch_to_dataset(\"train\")\n model.train()\n\n step = 0\n for batch in iterator:\n input_ids = batch[\"text_ids\"]\n\n outputs = model(inputs=input_ids, decoding_strategy='train_greedy')\n\n loss = tx.losses.sequence_sparse_softmax_cross_entropy(\n labels=batch['text_ids'][:, 1:],\n logits=outputs.logits[:, :-1, :],\n sequence_length=batch['length'] - 1,\n average_across_timesteps=True,\n sum_over_timesteps=False)\n loss.backward()\n train_op()\n\n if dis_steps > 0 and step % dis_steps == 0:\n print(\"step={}, loss={:.4f}\".format(step, loss))\n\n if eval_steps > 0 and step % eval_steps == 0:\n _eval_epoch()\n\n step += 1\n\n @torch.no_grad()\n def _eval_epoch():\n r\"\"\"Evaluates on the dev set.\n \"\"\"\n iterator.switch_to_dataset(\"eval\")\n model.eval()\n\n nsamples = 0\n avg_rec = tx.utils.AverageRecorder()\n for batch in iterator:\n input_ids = batch[\"text_ids\"]\n\n outputs = model(inputs=input_ids)\n\n loss = tx.losses.sequence_sparse_softmax_cross_entropy(\n labels=batch['text_ids'][:, 1:],\n logits=outputs.logits[:, :-1, :],\n sequence_length=batch['length'] - 1,\n average_across_timesteps=True,\n sum_over_timesteps=False)\n ppl = torch.exp(loss)\n batch_size = input_ids.size()[0]\n avg_rec.add([loss, ppl], batch_size)\n nsamples += batch_size\n\n print(\"eval loss: {:.4f}; ppl: {:.4f}; \"\n \"nsamples: {:d}\".format(avg_rec.avg(0), avg_rec.avg(1), nsamples))\n\n if args.do_train and avg_rec.avg(0) < eval_best[\"loss\"]:\n eval_best[\"loss\"] = avg_rec.avg(0)\n eval_best[\"ppl\"] = avg_rec.avg(1)\n ckpt_fn = os.path.join(args.output_dir, 'model_best.ckpt')\n torch.save(model.state_dict(), ckpt_fn)\n print(\"Checkpoint best to {}\".format(ckpt_fn))\n\n @torch.no_grad()\n def _test_epoch():\n r\"\"\"Generates samples on the test set.\n \"\"\"\n iterator.switch_to_dataset(\"test\")\n model.eval()\n\n _all_inputs = []\n _all_samples = []\n\n for batch in iterator:\n input_ids = batch[\"text_ids\"]\n length = batch[\"length\"]\n start_tokens = input_ids[:, 0]\n helper = _get_helper(start_tokens)\n\n output, _ = model(\n context=input_ids,\n context_sequence_length=length,\n max_decoding_length=max_decoding_length,\n helper=helper)\n sample_id = output.sample_id\n\n _inputs = []\n for i, l in zip(input_ids, length):\n # Delete padding\n _inputs.append(i[:l].tolist())\n _all_inputs.extend(_inputs)\n\n _samples = []\n for s, l in zip(sample_id, length):\n # Delte inputs from samples\n _samples.append(s[l:].tolist())\n _all_samples.extend(_samples)\n\n # Parse samples and write to file\n\n eos_token_id = tokenizer.map_token_to_id('<|endoftext|>')\n\n _all_input_text = []\n for i in _all_inputs:\n if i[0] == eos_token_id:\n # '<|endoftext|>' is used as the BOS token. Delete it here\n i = i[1:]\n i_text = tokenizer.map_id_to_text(i)\n _all_input_text.append(i_text)\n # '<|endoftext|>' is used as the PAD token. Delete them here\n _all_input_text = tx.utils.strip_eos(_all_input_text,\n eos_token='<|endoftext|>')\n\n _all_samples_text = []\n for i, s in zip(_all_inputs, _all_samples):\n s_text = tokenizer.map_id_to_text(s)\n s_text = s_text.replace('\\n', ' ')\n _all_samples_text.append(s_text)\n _all_samples_text = tx.utils.strip_eos(_all_samples_text,\n eos_token='<|endoftext|>')\n\n output_file = os.path.join(args.output_dir, \"test_samples.tsv\")\n print('Write samples to {}'.format(output_file))\n tx.utils.write_paired_text(\n _all_input_text, _all_samples_text, output_file)\n\n if args.do_train:\n for _ in range(config_train.max_train_epoch):\n _train_epoch()\n torch.save(model.state_dict(),\n os.path.join(args.output_dir, 'model.ckpt'))\n\n if args.do_eval:\n _eval_epoch()\n\n if args.do_test:\n _test_epoch()\n\n\nif __name__ == \"__main__\":\n main()\n", "\"\"\"\nUnit tests for BERT classifiers.\n\"\"\"\n\nimport unittest\n\nimport torch\n\nfrom texar.torch.modules.classifiers.bert_classifier import *\nfrom texar.torch.utils.test import pretrained_test\n\n\nclass BERTClassifierTest(unittest.TestCase):\n r\"\"\"Tests :class:`~texar.torch.modules.BERTClassifier` class.\n \"\"\"\n\n def setUp(self) -> None:\n self.batch_size = 2\n self.max_length = 3\n self.inputs = torch.zeros(\n self.batch_size, self.max_length, dtype=torch.long)\n\n @pretrained_test\n def test_model_loading(self):\n r\"\"\"Tests model loading functionality.\"\"\"\n for pretrained_model_name in BERTClassifier.available_checkpoints():\n classifier = BERTClassifier(\n pretrained_model_name=pretrained_model_name)\n _, _ = classifier(self.inputs)\n\n def test_trainable_variables(self):\n r\"\"\"Tests the functionality of automatically collecting trainable\n variables.\n \"\"\"\n # case 1\n hparams = {\n \"pretrained_model_name\": None,\n }\n classifier = BERTClassifier(hparams=hparams)\n self.assertEqual(len(classifier.trainable_variables), 199 + 2)\n _, _ = classifier(self.inputs)\n\n # case 2\n hparams = {\n \"pretrained_model_name\": None,\n \"clas_strategy\": \"all_time\",\n \"max_seq_length\": 8,\n }\n classifier = BERTClassifier(hparams=hparams)\n self.assertEqual(len(classifier.trainable_variables), 199 + 2)\n _, _ = classifier(self.inputs)\n\n # case 3\n hparams = {\n \"pretrained_model_name\": None,\n \"clas_strategy\": \"time_wise\",\n }\n classifier = BERTClassifier(hparams=hparams)\n self.assertEqual(len(classifier.trainable_variables), 199 + 2)\n _, _ = classifier(self.inputs)\n\n def test_classification(self):\n r\"\"\"Tests classification.\n \"\"\"\n inputs = torch.randint(30521, (self.batch_size, self.max_length))\n\n # case 1\n hparams = {\n \"pretrained_model_name\": None,\n }\n classifier = BERTClassifier(hparams=hparams)\n logits, preds = classifier(inputs)\n\n self.assertEqual(logits.shape, torch.Size(\n [self.batch_size, classifier.output_size]))\n self.assertEqual(preds.shape, torch.Size([self.batch_size]))\n\n # case 2\n hparams = {\n \"pretrained_model_name\": None,\n \"num_classes\": 10,\n \"clas_strategy\": \"time_wise\",\n }\n classifier = BERTClassifier(hparams=hparams)\n logits, preds = classifier(inputs)\n\n self.assertEqual(logits.shape, torch.Size(\n [self.batch_size, self.max_length, classifier.output_size]))\n self.assertEqual(preds.shape, torch.Size(\n [self.batch_size, self.max_length]))\n\n # case 3\n hparams = {\n \"pretrained_model_name\": None,\n \"num_classes\": 0,\n \"clas_strategy\": \"time_wise\",\n }\n classifier = BERTClassifier(hparams=hparams)\n logits, preds = classifier(inputs)\n\n self.assertEqual(logits.shape, torch.Size(\n [self.batch_size, self.max_length, classifier.output_size]))\n self.assertEqual(preds.shape, torch.Size(\n [self.batch_size, self.max_length]))\n\n # case 4\n hparams = {\n \"pretrained_model_name\": None,\n \"num_classes\": 10,\n \"clas_strategy\": \"all_time\",\n \"max_seq_length\": self.max_length,\n }\n classifier = BERTClassifier(hparams=hparams)\n logits, preds = classifier(inputs)\n\n self.assertEqual(logits.shape, torch.Size(\n [self.batch_size, classifier.output_size]))\n self.assertEqual(preds.shape, torch.Size([self.batch_size]))\n\n def test_binary(self):\n r\"\"\"Tests binary classification.\n \"\"\"\n inputs = torch.randint(30521, (self.batch_size, self.max_length))\n\n # case 1\n hparams = {\n \"pretrained_model_name\": None,\n \"num_classes\": 1,\n \"clas_strategy\": \"time_wise\",\n }\n classifier = BERTClassifier(hparams=hparams)\n logits, preds = classifier(inputs)\n\n self.assertEqual(logits.shape, torch.Size(\n [self.batch_size, self.max_length]))\n self.assertEqual(preds.shape, torch.Size(\n [self.batch_size, self.max_length]))\n\n # case 2\n hparams = {\n \"pretrained_model_name\": None,\n \"num_classes\": 1,\n \"clas_strategy\": \"cls_time\",\n \"max_seq_length\": self.max_length,\n }\n classifier = BERTClassifier(hparams=hparams)\n logits, preds = classifier(inputs)\n\n self.assertEqual(logits.shape, torch.Size([self.batch_size]))\n self.assertEqual(preds.shape, torch.Size([self.batch_size]))\n\n # case 3\n hparams = {\n \"pretrained_model_name\": None,\n \"num_classes\": 1,\n \"clas_strategy\": \"all_time\",\n \"max_seq_length\": self.max_length,\n }\n classifier = BERTClassifier(hparams=hparams)\n logits, preds = classifier(inputs)\n\n self.assertEqual(logits.shape, torch.Size([self.batch_size]))\n self.assertEqual(preds.shape, torch.Size([self.batch_size]))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright 2019 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nTransformer decoder.\n\"\"\"\nimport warnings\nfrom typing import Callable, Dict, NamedTuple, Optional, Tuple, Union\n\nimport torch\nfrom torch import nn\n\nfrom texar.torch.core import layers\nfrom texar.torch.modules.decoders.decoder_base import (\n DecoderBase, TokenEmbedder, TokenPosEmbedder, _make_output_layer)\nfrom texar.torch.modules.decoders.decoder_helpers import (\n EmbeddingHelper, Helper)\nfrom texar.torch.modules.encoders.multihead_attention import (\n Cache, MultiheadAttentionEncoder)\nfrom texar.torch.modules.encoders.transformer_encoder import (\n default_transformer_poswise_net_hparams)\nfrom texar.torch.modules.networks.networks import FeedForwardNetwork\nfrom texar.torch.utils import transformer_attentions as attn\nfrom texar.torch.utils.beam_search import beam_search\nfrom texar.torch.utils.shapes import mask_sequences\nfrom texar.torch.utils.utils import sequence_mask\n\n__all__ = [\n 'TransformerDecoderOutput',\n 'TransformerDecoder',\n]\n\nEmbeddingFn = Callable[[torch.LongTensor, torch.LongTensor], torch.Tensor]\n\n\nclass TransformerDecoderOutput(NamedTuple):\n r\"\"\"The output of :class:`TransformerDecoder`.\n \"\"\"\n logits: torch.Tensor\n r\"\"\"A :tensor:`Tensor` of shape ``[batch_size, max_time, vocab_size]``\n containing the logits.\"\"\"\n sample_id: torch.LongTensor\n r\"\"\"A :tensor:`LongTensor` of shape ``[batch_size, max_time]``\n (or ``[batch_size, max_time, vocab_size]``) containing the sampled\n token indices. Note that the shape of ``sample_id`` is different for\n different decoding strategy or helper. Please refer to\n :class:`~texar.torch.modules.Helper` for the detailed information.\"\"\"\n\n\nclass TransformerDecoder(DecoderBase[Cache, TransformerDecoderOutput]):\n r\"\"\"Transformer decoder that applies multi-head self-attention for\n sequence decoding.\n\n It is a stack of\n :class:`~texar.torch.modules.MultiheadAttentionEncoder`,\n :class:`~texar.torch.modules.FeedForwardNetwork`, and residual connections.\n\n Args:\n token_embedder: An instance of :torch_nn:`Module`, or a function taking\n a :tensor:`LongTensor` ``tokens`` as argument. This is the embedder\n called in :meth:`embed_tokens` to convert input tokens to\n embeddings.\n token_pos_embedder: An instance of :torch_nn:`Module`, or a function\n taking two :tensor:`LongTensor`\\ s ``tokens`` and ``positions`` as\n argument. This is the embedder called in :meth:`embed_tokens` to\n convert input tokens with positions to embeddings.\n\n .. note::\n Only one among :attr:`token_embedder` and\n :attr:`token_pos_embedder` should be specified. If neither is\n specified, you must subclass :class:`TransformerDecoder` and\n override :meth:`embed_tokens`.\n vocab_size (int, optional): Vocabulary size. Required if\n :attr:`output_layer` is `None`.\n output_layer (optional): An output layer that transforms cell output\n to logits. This can be:\n\n - A callable layer, e.g., an instance of :torch_nn:`Module`.\n - A tensor. A :torch_nn:`Linear` layer will be created using the\n tensor as weights. The bias of the dense layer is determined\n by ``hparams.output_layer_bias``. This can be used to tie the\n output layer with the input embedding matrix, as proposed in\n https://arxiv.org/pdf/1608.05859.pdf.\n - `None`. A :torch_nn:`Linear` layer will be created based on\n :attr:`vocab_size` and ``hparams.output_layer_bias``.\n - If no output layer is needed at the end, set\n :attr:`vocab_size` to `None` and ``output_layer`` to\n :func:`~texar.torch.core.identity`.\n hparams (dict or HParams, optional): Hyperparameters. Missing\n hyperparameters will be set to default values. See\n :meth:`default_hparams` for the hyperparameter structure and\n default values.\n\n .. document private functions\n \"\"\"\n\n # State variables used during `dynamic_decode`. Assigned in `forward`.\n _state_max_decoding_length: int\n _state_context: Optional[torch.LongTensor]\n _state_context_sequence_length: Optional[torch.LongTensor]\n _state_cache: Cache\n\n def __init__(self,\n token_embedder: Optional[TokenEmbedder] = None,\n token_pos_embedder: Optional[TokenPosEmbedder] = None,\n vocab_size: Optional[int] = None,\n output_layer: Optional[Union[nn.Module, torch.Tensor]] = None,\n hparams=None):\n super().__init__(\n token_embedder, token_pos_embedder,\n input_time_major=False, output_time_major=False, hparams=hparams)\n\n if token_pos_embedder is None and token_embedder is not None:\n warnings.warn(\n \"Transformer models cannot capture positional information if \"\n \"no positional embedding is provided.\")\n\n self._input_size = self._hparams.dim\n self._output_layer, self._vocab_size = _make_output_layer(\n output_layer, vocab_size, self._input_size,\n self._hparams.output_layer_bias)\n\n self.self_attns = nn.ModuleList()\n self.self_attn_layer_norm = nn.ModuleList()\n self.enc_dec_attns = nn.ModuleList()\n self.end_dec_attn_layer_norm = nn.ModuleList()\n self.poswise_networks = nn.ModuleList()\n self.poswise_layer_norm = nn.ModuleList()\n\n if self._hparams.use_gpt_config:\n eps = 1e-5\n else:\n eps = 1e-12\n\n for _ in range(self._hparams.num_blocks):\n attn_module = MultiheadAttentionEncoder(\n self._input_size, self._hparams.multihead_attention)\n if self._hparams.dim != attn_module.output_size:\n raise ValueError(\"The output dimension of \"\n \"MultiheadEncoder should be equal \"\n \"to the dim of TransformerDecoder\")\n self.self_attns.append(attn_module)\n self.self_attn_layer_norm.append(\n nn.LayerNorm(self._input_size, eps=eps))\n\n attn_module = MultiheadAttentionEncoder(\n self._input_size, self._hparams.multihead_attention)\n if self._hparams.dim != attn_module.output_size:\n raise ValueError(\"The output dimension of \"\n \"MultiheadEncoder should be equal \"\n \"to the dim of TransformerDecoder\")\n self.enc_dec_attns.append(attn_module)\n self.end_dec_attn_layer_norm.append(\n nn.LayerNorm(self._input_size, eps=eps))\n\n poswise_network = FeedForwardNetwork(\n hparams=self._hparams.poswise_feedforward)\n if (poswise_network.hparams.layers[-1]['kwargs']['out_features']\n != self._hparams.dim):\n raise ValueError(\"The output dimension of \"\n \"FeedForwardNetwork should be equal \"\n \"to the dim of TransformerDecoder\")\n self.poswise_networks.append(poswise_network)\n self.poswise_layer_norm.append(\n nn.LayerNorm(self._input_size, eps=eps))\n\n self.final_layer_norm = nn.LayerNorm(self._input_size, eps=eps)\n self.embed_dropout = nn.Dropout(self._hparams.embedding_dropout)\n self.residual_dropout = nn.Dropout(self._hparams.residual_dropout)\n\n if self._hparams.initializer:\n # TODO: This might be different to what TensorFlow does\n initialize = layers.get_initializer(self._hparams.initializer)\n assert initialize is not None\n # Do not re-initialize LayerNorm modules.\n for name, param in self.named_parameters():\n if name.split(\".\")[-1] == \"weight\" and \"layer_norm\" not in name:\n initialize(param)\n\n @staticmethod\n def default_hparams():\n r\"\"\"Returns a dictionary of hyperparameters with default values.\n\n .. code-block:: python\n\n {\n # Same as in TransformerEncoder\n \"num_blocks\": 6,\n \"dim\": 512,\n \"use_gpt_config\": False,\n \"embedding_dropout\": 0.1,\n \"residual_dropout\": 0.1,\n \"poswise_feedforward\": default_transformer_poswise_net_hparams,\n \"multihead_attention\": {\n 'name': 'multihead_attention',\n 'num_units': 512,\n 'output_dim': 512,\n 'num_heads': 8,\n 'dropout_rate': 0.1,\n 'use_bias': False,\n },\n \"initializer\": None,\n \"name\": \"transformer_decoder\"\n\n # Additional for TransformerDecoder\n \"embedding_tie\": True,\n \"output_layer_bias\": False,\n \"max_decoding_length\": int(1e10),\n }\n\n Here:\n\n `\"num_blocks\"`: int\n Number of stacked blocks.\n\n `\"dim\"`: int\n Hidden dimension of the encoder.\n\n `\"use_gpt_config\"`: bool\n Whether to follow the `eps` setting of OpenAI GPT.\n\n `\"embedding_dropout\"`: float\n Dropout rate of the input word and position embeddings.\n\n `\"residual_dropout\"`: float\n Dropout rate of the residual connections.\n\n `\"poswise_feedforward\"`: dict\n Hyperparameters for a feed-forward network used in residual\n connections.\n Make sure the dimension of the output tensor is equal to ``dim``.\n\n See\n :func:`~texar.torch.modules.default_transformer_poswise_net_hparams`\n for details.\n\n `\"multihead_attention\"`: dict\n Hyperparameters for the multi-head attention strategy.\n Make sure the ``output_dim`` in this module is equal to ``dim``.\n\n See :class:`~texar.torch.modules.MultiheadAttentionEncoder`\n for details.\n\n `\"initializer\"`: dict, optional\n Hyperparameters of the default initializer that initializes\n variables created in this module.\n\n See :func:`~texar.torch.core.get_initializer` for details.\n\n `\"embedding_tie\"`: bool\n Whether to use the word embedding matrix as the output layer\n that computes logits. If `False`, a new dense layer is created.\n\n `\"output_layer_bias\"`: bool\n Whether to use bias to the output layer.\n\n `\"max_decoding_length\"`: int\n The maximum allowed number of decoding steps.\n Set to a very large number of avoid the length constraint.\n Ignored if provided in :meth:`forward` or ``\"train_greedy\"``\n decoding is used.\n\n `\"name\"`: str\n Name of the module.\n \"\"\"\n dim = 512\n return {\n 'num_blocks': 6,\n 'dim': dim,\n 'use_gpt_config': False,\n 'embedding_tie': True,\n 'output_layer_bias': False,\n 'max_decoding_length': int(1e10),\n 'embedding_dropout': 0.1,\n 'residual_dropout': 0.1,\n 'poswise_feedforward': default_transformer_poswise_net_hparams(dim),\n 'multihead_attention': {\n 'name': 'multihead_attention',\n 'num_units': 512,\n 'num_heads': 8,\n 'dropout_rate': 0.1,\n 'output_dim': 512,\n 'use_bias': False,\n },\n 'initializer': None,\n 'name': \"transformer_decoder\",\n }\n\n def _inputs_to_outputs(self, inputs: torch.Tensor,\n cache: Cache) -> Tuple[torch.Tensor, Cache]:\n r\"\"\"Returns the outputs of one decoding step (for example,\n the predicted logits of the next token).\n\n :attr:`inputs` should be of shape ``[batch_size, dim]``.\n\n Returns:\n A tuple of logits and updated cache. Logits are of shape\n ``[batch_size, vocab_size]``.\n \"\"\"\n outputs = self._self_attention_stack(\n inputs.unsqueeze(1), memory=cache['memory'], cache=cache)\n outputs = self._output_layer(outputs)\n outputs = outputs.squeeze(1)\n return outputs, cache\n\n def forward(self, # type: ignore\n inputs: Optional[torch.Tensor] = None,\n sequence_length: Optional[torch.LongTensor] = None,\n memory: Optional[torch.Tensor] = None,\n memory_sequence_length: Optional[torch.LongTensor] = None,\n memory_attention_bias: Optional[torch.Tensor] = None,\n context: Optional[torch.Tensor] = None,\n context_sequence_length: Optional[torch.LongTensor] = None,\n helper: Optional[Helper] = None,\n decoding_strategy: str = 'train_greedy',\n max_decoding_length: Optional[int] = None,\n impute_finished: bool = False,\n infer_mode: Optional[bool] = None,\n beam_width: Optional[int] = None,\n length_penalty: float = 0.,\n **kwargs) \\\n -> Union[\n TransformerDecoderOutput,\n Tuple[TransformerDecoderOutput, torch.LongTensor],\n Dict[str, torch.Tensor]]:\n r\"\"\"Performs decoding.\n\n The interface is very similar to that of RNN decoders\n (:class:`~texar.torch.modules.RNNDecoderBase`). In particular,\n the function provides **3 ways** to specify the decoding method, with\n varying flexibility:\n\n 1. The :attr:`decoding_strategy` argument.\n\n - **\"train_greedy\"**: decoding in teacher-forcing fashion (i.e.,\n feeding ground truth to decode the next step), and for each step\n sample is obtained by taking the `argmax` of logits.\n Argument :attr:`inputs` is required for this strategy.\n :attr:`sequence_length` is optional.\n - **\"infer_greedy\"**: decoding in inference fashion (i.e., feeding\n `generated` sample to decode the next step), and for each step\n sample is obtained by taking the `argmax` of logits.\n Arguments :attr:`(start_tokens, end_token)` are\n required for this strategy, and argument\n :attr:`max_decoding_length` is optional.\n - **\"infer_sample\"**: decoding in inference fashion, and for each\n step sample is obtained by `random sampling` from the logits.\n Arguments :attr:`(start_tokens, end_token)` are required for this\n strategy, and argument :attr:`max_decoding_length` is optional.\n\n This argument is used only when arguments :attr:`helper` and\n :attr:`beam_width` are both `None`.\n\n 2. The :attr:`helper` argument: An instance of subclass of\n :class:`~texar.torch.modules.Helper`.\n This provides a superset of decoding strategies than above.\n The interface is the same as in RNN decoders.\n Please refer to :meth:`texar.torch.modules.RNNDecoderBase.forward`\n for detailed usage and examples.\n\n Note that, here, though using a\n :class:`~texar.torch.modules.TrainingHelper` corresponding to the\n ``\"train_greedy\"`` strategy above, the implementation is *slower*\n than directly setting ``decoding_strategy=\"train_greedy\"`` (though\n output results are the same).\n\n Argument :attr:`max_decoding_length` is optional.\n\n 3. **Beam search**: set :attr:`beam_width` to use beam search decoding.\n Arguments :attr:`(start_tokens, end_token)` are required,\n and argument :attr:`max_decoding_length` is optional.\n\n Args:\n memory (optional): The memory to attend, e.g., the output of an RNN\n encoder. A :tensor:`Tensor` of shape\n ``[batch_size, memory_max_time, dim]``.\n memory_sequence_length (optional): A :tensor:`Tensor` of shape\n ``[batch_size]`` containing the sequence lengths for the batch\n entries in memory. Used to create attention bias of\n :attr:`memory_attention_bias` is not given. Ignored if\n :attr:`memory_attention_bias` is provided.\n memory_attention_bias (optional): A :tensor:`Tensor` of shape\n ``[batch_size, num_heads, memory_max_time, dim]``.\n An attention bias typically sets the value of a padding\n position to a large negative value for masking. If not given,\n :attr:`memory_sequence_length` is used to automatically\n create an attention bias.\n inputs (optional): Input tensors for teacher forcing decoding.\n Used when :attr:`decoding_strategy` is set to\n ``\"train_greedy\"``, or when `hparams`-configured helper is used.\n\n The attr:`inputs` is a :tensor:`LongTensor` used as index to\n look up embeddings and feed in the decoder. For example, if\n :attr:`embedder` is an instance of\n :class:`~texar.torch.modules.WordEmbedder`, then :attr:`inputs`\n is usually a 2D int Tensor `[batch_size, max_time]` (or\n `[max_time, batch_size]` if `input_time_major` == `True`)\n containing the token indexes.\n sequence_length (optional): A :tensor:`LongTensor` of shape\n ``[batch_size]``, containing the sequence length of\n :attr:`inputs`. Tokens beyond the respective sequence length are\n masked out.\n Used when :attr:`decoding_strategy` is set to\n ``\"train_greedy\"``.\n decoding_strategy (str): A string specifying the decoding\n strategy, including ``\"train_greedy\"``, ``\"infer_greedy\"``,\n ``\"infer_sample\"``.\n Different arguments are required based on the\n strategy. See above for details. Ignored if\n :attr:`beam_width` or :attr:`helper` is set.\n beam_width (int): Set to use beam search. If given,\n :attr:`decoding_strategy` is ignored.\n length_penalty (float): Length penalty coefficient used in beam\n search decoding. Refer to https://arxiv.org/abs/1609.08144\n for more details.\n It should be larger if longer sentences are desired.\n context (optional): An :tensor:`LongTensor` of shape\n ``[batch_size, length]``, containing the starting tokens for\n decoding. If context is set, ``start_tokens`` of the\n :class:`~texar.torch.modules.Helper` will be ignored.\n context_sequence_length (optional): Specify the length of context.\n max_decoding_length (int, optional): The maximum allowed number of\n decoding steps.\n If `None` (default), use ``\"max_decoding_length\"`` defined in\n :attr:`hparams`. Ignored in ``\"train_greedy\"`` decoding.\n impute_finished (bool): If `True`, then states for batch\n entries which are marked as finished get copied through and\n the corresponding outputs get zeroed out. This causes some\n slowdown at each time step, but ensures that the final state\n and outputs have the correct values and that backprop ignores\n time steps that were marked as finished. Ignored in\n ``\"train_greedy\"`` decoding.\n helper (optional): An instance of\n :class:`~texar.torch.modules.Helper`\n that defines the decoding strategy. If given,\n ``decoding_strategy`` and helper configurations in\n :attr:`hparams` are ignored.\n infer_mode (optional): If not `None`, overrides mode given by\n :attr:`self.training`.\n\n Returns:\n\n - For **\"train_greedy\"** decoding, returns an instance of\n :class:`~texar.torch.modules.TransformerDecoderOutput` which\n contains `sample_id` and `logits`.\n\n - For **\"infer_greedy\"** and **\"infer_sample\"** decoding or\n decoding with :attr:`helper`, returns\n a tuple ``(outputs, sequence_lengths)``, where ``outputs`` is an\n instance of :class:`~texar.torch.modules.TransformerDecoderOutput`\n as in `\"train_greedy\"`, and ``sequence_lengths`` is a\n :tensor:`LongTensor` of shape ``[batch_size]`` containing the\n length of each sample.\n\n - For **beam search** decoding, returns a ``dict`` containing keys\n ``\"sample_id\"`` and ``\"log_prob\"``.\n\n - ``\"sample_id\"`` is a :tensor:`LongTensor` of shape\n ``[batch_size, max_time, beam_width]`` containing generated\n token indexes. ``sample_id[:,:,0]`` is the highest-probable\n sample.\n - ``\"log_prob\"`` is a :tensor:`Tensor` of shape\n ``[batch_size, beam_width]`` containing the log probability\n of each sequence sample.\n \"\"\"\n\n if memory is not None:\n if memory_attention_bias is None:\n if memory_sequence_length is None:\n raise ValueError(\n \"`memory_sequence_length` is required if \"\n \"`memory_attention_bias` is not given.\")\n\n enc_padding = 1 - sequence_mask(\n memory_sequence_length, memory.size(1),\n dtype=torch.float32)\n memory_attention_bias = attn.attention_bias_ignore_padding(\n enc_padding)\n\n # record the context, which will be used in step function\n # for dynamic_decode\n if context is not None:\n if context_sequence_length is None:\n raise ValueError(\"'context_sequence_length' must not be None\"\n \"when 'context' is specified.\")\n self._state_context = context[:, 1:]\n self._state_context_sequence_length = context_sequence_length - 1\n else:\n self._state_context = None\n self._state_context_sequence_length = None\n\n # Faster code path for teacher-forcing training\n if (helper is None and beam_width is None and\n decoding_strategy == 'train_greedy'):\n if inputs is None:\n raise ValueError(\"'input' must not be none \"\n \"when using 'train_greedy' decoding strategy.\")\n times = torch.arange(\n inputs.size(1), dtype=torch.long, device=inputs.device)\n times = times.unsqueeze(0).expand(inputs.size(0), -1)\n inputs = self.embed_tokens(inputs, times)\n if sequence_length is not None:\n inputs = mask_sequences(inputs, sequence_length)\n\n decoder_self_attention_bias = (\n attn.attention_bias_lower_triangle(inputs.size(1)))\n\n decoder_output = self._self_attention_stack(\n inputs, memory, decoder_self_attention_bias,\n memory_attention_bias, cache=None)\n logits = self._output_layer(decoder_output)\n sample_id = torch.argmax(logits, dim=-1)\n\n return TransformerDecoderOutput(logits, sample_id)\n\n # Inference code path.\n if max_decoding_length is None:\n max_decoding_length = self._hparams.max_decoding_length\n\n self._state_max_decoding_length = max_decoding_length\n\n if beam_width is None or beam_width == 1: # Inference-like decoding\n # Prepare helper\n if helper is None:\n kwargs.update(decoding_strategy=decoding_strategy)\n if context is not None:\n kwargs.update(start_tokens=context[:, 0])\n helper = self._create_or_get_helper(infer_mode, **kwargs)\n assert isinstance(helper, EmbeddingHelper)\n\n self._state_cache = self._init_cache(\n memory, memory_attention_bias,\n beam_search_decoding=False, batch_size=helper.batch_size)\n if context is not None:\n assert self._state_context is not None\n pad_length = max_decoding_length - self._state_context.size(1)\n if pad_length > 0:\n self._state_context = torch.cat((\n self._state_context,\n self._state_context.new_zeros(\n self._state_context.size(0), pad_length)\n ), dim=1)\n\n outputs, cache, sequence_lengths = self.dynamic_decode(\n helper, inputs=None, sequence_length=None,\n initial_state=None, max_decoding_length=max_decoding_length,\n impute_finished=impute_finished)\n del cache # not used\n\n if context is not None:\n # Here the length of sample_id will be larger than that\n # of logit by 1, because there will be a additional\n # start_token in the returned sample_id.\n # the start_id should be the first token of the\n # given context\n start_tokens = context[:, 0]\n outputs = TransformerDecoderOutput(\n logits=outputs.logits,\n sample_id=torch.cat([\n start_tokens.unsqueeze(1),\n outputs.sample_id\n ], dim=1))\n sequence_lengths = sequence_lengths + 1\n\n return outputs, sequence_lengths\n\n else: # Beam-search decoding\n # Ignore `decoding_strategy` and # assume `helper` is not set.\n if helper is not None:\n raise ValueError(\"Must not set 'beam_width' and 'helper' \"\n \"simultaneously.\")\n if context is not None:\n start_tokens = context[:, 0]\n else:\n if 'start_tokens' not in kwargs:\n raise ValueError(\n \"'start_tokens' must be specified when using\"\n \"beam search decoding.\")\n start_tokens = kwargs['start_tokens']\n _batch_size = start_tokens.size(0)\n self._state_cache = self._init_cache(\n memory, memory_attention_bias,\n beam_search_decoding=True,\n batch_size=_batch_size)\n end_token: int = kwargs.get('end_token') # type: ignore\n\n # The output format is different when running beam search.\n sample_id, log_prob = self.beam_decode(\n start_tokens,\n end_token,\n embedding_fn=self.embed_tokens,\n beam_width=beam_width,\n length_penalty=length_penalty,\n decode_length=max_decoding_length)\n\n return {\n 'sample_id': sample_id,\n 'log_prob': log_prob\n }\n\n def _self_attention_stack(\n self, inputs: torch.Tensor,\n memory: Optional[torch.Tensor],\n decoder_self_attention_bias: Optional[torch.Tensor] = None,\n memory_attention_bias: Optional[torch.Tensor] = None,\n cache: Optional[Cache] = None) -> torch.Tensor:\n r\"\"\"Forward through the stacked multi-head attentions.\n \"\"\"\n inputs = self.embed_dropout(inputs)\n if cache is not None:\n if memory is not None:\n memory_attention_bias = cache['memory_attention_bias']\n else:\n assert decoder_self_attention_bias is not None\n\n x = inputs\n for i in range(self._hparams.num_blocks):\n layer_cache = cache['layers'][i] if cache is not None else None\n\n selfatt_output = self.self_attns[i](\n queries=self.self_attn_layer_norm[i](x),\n memory=None,\n memory_attention_bias=decoder_self_attention_bias,\n cache=layer_cache)\n x = x + self.residual_dropout(selfatt_output)\n\n if memory is not None:\n encdec_output = self.enc_dec_attns[i](\n queries=self.end_dec_attn_layer_norm[i](x),\n memory=memory,\n memory_attention_bias=memory_attention_bias)\n x = x + self.residual_dropout(encdec_output)\n\n sub_output = self.poswise_networks[i](self.poswise_layer_norm[i](x))\n x = x + self.residual_dropout(sub_output)\n\n return self.final_layer_norm(x)\n\n def _init_cache(self, memory: Optional[torch.Tensor],\n memory_attention_bias: Optional[torch.Tensor],\n beam_search_decoding: bool,\n batch_size: int) -> Cache:\n r\"\"\"Returns an initialized cache.\n\n In order to support both inference-like decoding and beam-search\n decoding, the elements of each layer must be initialized and extended\n as different structure respectively. Specifically, for inference-like\n decoding, a simple list is used; for beam-search decoding, a\n :tensor:`Tensor` of shape ``[batch_size, current_steps, num_units]``\n is maintained, where ``current_steps`` is the number of steps currently\n decoded.\n \"\"\"\n\n device = next(self.parameters()).device\n\n def _create_ta():\n return []\n\n def _create_empty_tensor():\n ret = torch.zeros(\n batch_size, 0, self._hparams.multihead_attention.num_units,\n dtype=torch.float, device=device)\n return ret\n\n _create_fn = (_create_empty_tensor if beam_search_decoding\n else _create_ta)\n\n cache: Cache = {\n 'memory': memory,\n 'memory_attention_bias': memory_attention_bias,\n 'layers': [{\n 'keys': _create_fn(),\n 'values': _create_fn(),\n } for _ in range(self._hparams.num_blocks)],\n }\n\n return cache\n\n def beam_decode(self, start_tokens: torch.LongTensor, end_token: int,\n embedding_fn: Callable[\n [torch.LongTensor, torch.LongTensor], torch.Tensor],\n decode_length: int = 256, beam_width: int = 5,\n length_penalty: float = 0.6) \\\n -> Tuple[torch.Tensor, torch.Tensor]:\n\n def _symbols_to_logits_fn(ids, cache):\n batch_size = ids.size(0)\n step = ids.size(-1) - 1\n times = ids.new_full((batch_size,), step)\n inputs = embedding_fn(ids[:, -1], times)\n return self._inputs_to_outputs(inputs, cache)\n\n assert self._vocab_size is not None\n\n outputs, log_prob = beam_search(\n _symbols_to_logits_fn,\n start_tokens,\n beam_width,\n decode_length,\n self._vocab_size,\n length_penalty,\n states=self._state_cache,\n eos_id=end_token)\n\n # Ignores <BOS>\n outputs = outputs[:, :, 1:]\n # shape = [batch_size, seq_length, beam_width]\n outputs = outputs.permute(0, 2, 1)\n return outputs, log_prob\n\n @property\n def output_size(self) -> int:\n r\"\"\"Output size of one step.\n \"\"\"\n return self._input_size\n\n def initialize(self, helper: Helper, inputs: Optional[torch.Tensor],\n sequence_length: Optional[torch.LongTensor],\n initial_state: Optional[Cache]) \\\n -> Tuple[torch.ByteTensor, torch.Tensor, Cache]:\n initial_finished, initial_inputs = helper.initialize(\n self.embed_tokens, inputs, sequence_length)\n state = initial_state or self._state_cache\n return initial_finished, initial_inputs, state\n\n def step(self, helper: Helper, time: int, inputs: torch.Tensor,\n state: Optional[Cache]) -> \\\n Tuple[TransformerDecoderOutput, Cache]:\n assert state is not None\n outputs, state = self._inputs_to_outputs(inputs, state)\n sample_ids = helper.sample(time=time, outputs=outputs)\n if self._state_context is not None:\n assert self._state_context_sequence_length is not None\n sample_ids = torch.where(\n self._state_context_sequence_length > time,\n self._state_context[:, time],\n sample_ids)\n\n next_state = state\n outputs = TransformerDecoderOutput(\n logits=outputs,\n sample_id=sample_ids)\n return outputs, next_state\n\n def next_inputs(self, helper: Helper, time: int,\n outputs: TransformerDecoderOutput) -> \\\n Tuple[torch.Tensor, torch.ByteTensor]:\n finished, next_inputs = helper.next_inputs(\n self.embed_tokens, time, outputs.logits, outputs.sample_id)\n return next_inputs, finished\n\n def finalize(self, # type: ignore\n outputs: TransformerDecoderOutput,\n final_state: Optional[Cache],\n sequence_lengths: torch.LongTensor) \\\n -> Tuple[TransformerDecoderOutput, Optional[Cache]]:\n # Clear state variables at end of decoding.\n del self._state_max_decoding_length\n del self._state_context\n del self._state_context_sequence_length\n del self._state_cache\n\n return super().finalize(outputs, final_state, sequence_lengths)\n" ]
[ [ "torch.nn.functional.embedding", "torch.randint", "torch.nn.LSTM", "numpy.issubdtype", "torch.is_tensor", "torch.tensor", "numpy.testing.assert_array_equal", "numpy.random.randint", "torch.rand", "numpy.testing.assert_allclose", "torch.argmax" ], [ "torch.exp", "torch.no_grad", "torch.cuda.is_available", "torch.load" ], [ "torch.Size", "torch.randint", "torch.zeros" ], [ "torch.nn.Dropout", "torch.zeros", "torch.nn.ModuleList", "torch.nn.LayerNorm", "torch.where", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
artemkurylev/Context-Aware_Crowd_Counting-pytorch
[ "d68ddd87b99f2afc512357cb8fcb0ca41ea22865" ]
[ "train.py" ]
[ "import numpy as np\nimport time\nimport torch\nimport torch.nn as nn\nimport os\nimport visdom\nimport random\nfrom tqdm import tqdm as tqdm\n\nfrom cannet import CANNet\nfrom my_dataset import CrowdDataset\n\nif __name__==\"__main__\":\n # configuration\n train_image_root='./data/Shanghai_part_A/train_data/images'\n train_dmap_root='./data/Shanghai_part_A/train_data/ground_truth'\n test_image_root='./data/Shanghai_part_A/test_data/images'\n test_dmap_root='./data/Shanghai_part_A/test_data/ground_truth'\n gpu_or_cpu='cuda' # use cuda or cpu\n lr = 1e-7\n batch_size = 1\n momentum = 0.95\n epochs = 20000\n steps = [-1,1,100,150]\n scales = [1,1,1,1]\n workers = 4\n seed = time.time()\n print_freq = 30 \n \n vis=visdom.Visdom()\n device=torch.device(gpu_or_cpu)\n torch.cuda.manual_seed(seed)\n model=CANNet().to(device)\n criterion=nn.MSELoss(size_average=False).to(device)\n optimizer=torch.optim.SGD(model.parameters(),lr,\n momentum=momentum,\n weight_decay=0)\n# optimizer=torch.optim.Adam(model.parameters(),lr)\n train_dataset=CrowdDataset(train_image_root,train_dmap_root,gt_downsample=8,phase='train')\n train_loader=torch.utils.data.DataLoader(train_dataset,batch_size=1,shuffle=True)\n test_dataset=CrowdDataset(test_image_root,test_dmap_root,gt_downsample=8,phase='test')\n test_loader=torch.utils.data.DataLoader(test_dataset,batch_size=1,shuffle=False)\n \n if not os.path.exists('./checkpoints'):\n os.mkdir('./checkpoints')\n min_mae=10000\n min_epoch=0\n train_loss_list=[]\n epoch_list=[]\n test_error_list=[]\n for epoch in range(0,epochs):\n # training phase\n model.train()\n epoch_loss=0\n for i,(img,gt_dmap) in enumerate(tqdm(train_loader)):\n img=img.to(device)\n gt_dmap=gt_dmap.to(device)\n # forward propagation\n et_dmap=model(img)\n # calculate loss\n loss=criterion(et_dmap,gt_dmap)\n epoch_loss+=loss.item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n# print(\"epoch:\",epoch,\"loss:\",epoch_loss/len(dataloader))\n epoch_list.append(epoch)\n train_loss_list.append(epoch_loss/len(train_loader))\n torch.save(model.state_dict(),'./checkpoints/epoch_'+str(epoch)+\".pth\")\n \n # testing phase\n model.eval()\n mae=0\n for i,(img,gt_dmap) in enumerate(tqdm(test_loader)):\n img=img.to(device)\n gt_dmap=gt_dmap.to(device)\n # forward propagation\n et_dmap=model(img)\n mae+=abs(et_dmap.data.sum()-gt_dmap.data.sum()).item()\n del img,gt_dmap,et_dmap\n if mae/len(test_loader)<min_mae:\n min_mae=mae/len(test_loader)\n min_epoch=epoch\n test_error_list.append(mae/len(test_loader))\n print(\"epoch:\"+str(epoch)+\" error:\"+str(mae/len(test_loader))+\" min_mae:\"+str(min_mae)+\" min_epoch:\"+str(min_epoch))\n vis.line(win=1,X=epoch_list, Y=train_loss_list, opts=dict(title='train_loss'))\n vis.line(win=2,X=epoch_list, Y=test_error_list, opts=dict(title='test_error'))\n # show an image\n index=random.randint(0,len(test_loader)-1)\n img,gt_dmap=test_dataset[index]\n vis.image(win=3,img=img,opts=dict(title='img'))\n vis.image(win=4,img=gt_dmap/(gt_dmap.max())*255,opts=dict(title='gt_dmap('+str(gt_dmap.sum())+')'))\n img=img.unsqueeze(0).to(device)\n gt_dmap=gt_dmap.unsqueeze(0)\n et_dmap=model(img)\n et_dmap=et_dmap.squeeze(0).detach().cpu().numpy()\n vis.image(win=5,img=et_dmap/(et_dmap.max())*255,opts=dict(title='et_dmap('+str(et_dmap.sum())+')'))\n \n import time\n print(time.strftime('%Y.%m.%d %H:%M:%S',time.localtime(time.time())))\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n " ]
[ [ "torch.device", "torch.utils.data.DataLoader", "torch.nn.MSELoss", "torch.cuda.manual_seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
subhadip7879/neural-net
[ "04aacf7cdec89ea0f58f2c7397c72adefa8c2d4e" ]
[ "image-classifier/image-classifier.py" ]
[ "from IPython.display import Image\n\n\nImage('images/02_network_flowchart.png')\nImage('images/02_convolution.png')\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport time\nfrom datetime import timedelta\nimport math\nfrom tensorflow.examples.tutorials.mnist import input_data\n\ntf.__version__\n#Convolutional Layer 1.\n# will connect each neuron to only a local region of the input volume \n# Convolution filters are 5 x 5 pixels.\nfilter_size1 = 5 \nnum_filters1 = 16 \n\n\n# Convolutional Layer 2.\nfilter_size2 = 5 \nnum_filters2 = 36 \n\n# Fully-connected layer.\nfc_size = 128 \n\n\ndata = input_data.read_data_sets('data/MNIST/', one_hot=True)\n\nprint(\"Size of:\")\nprint(\"- Training-set:\\t\\t{}\".format(len(data.train.labels)))\nprint(\"- Test-set:\\t\\t{}\".format(len(data.test.labels)))\nprint(\"- Validation-set:\\t{}\".format(len(data.validation.labels)))\n\ndata.test.cls = np.argmax(data.test.labels, axis=1)\n\nimg_size = 28\n# Images are stored in 1d array of this length.\nimg_size_flat = img_size * img_size\nimg_shape = (img_size, img_size)\nnum_channels = 1\nnum_classes = 10\n\ndef plot_images(images, cls_true, cls_pred=None):\n assert len(images) == 9 \n len(cls_true) == 9\n fig, axes = plt.subplots(3, 3)\n fig.subplots_adjust(hspace=0.3, wspace=0.3)\n for i, ax in enumerate(axes.flat):\n # Plot image.\n ax.imshow(images[i].reshape(img_shape), cmap='binary')\n \n if cls_pred is None:\n xlabel = \"True: {0}\".format(cls_true[i])\n else:\n xlabel = \"True: {0}, Pred: {1}\".format(cls_true[i], cls_pred[i])\n \n ax.set_xlabel(xlabel)\n \n ax.set_xticks([])\n ax.set_yticks([])\n plt.show()\n \n\n# first images from mnist\nimages = data.test.images[0:9]\ncls_true = data.test.cls[0:9]\n# Plot the images and labels\nplot_images(images=images, cls_true=cls_true)\n\ndef new_weights(shape):\n return tf.Variable(tf.truncated_normal(shape, stddev=0.05))\n\ndef new_biases(length):\n return tf.Variable(tf.constant(0.05, shape=[length]))\n\ndef new_conv_layer(input,num_input_channels, filter_size,num_filters,use_pooling=True):\n \n shape = [filter_size, filter_size, num_input_channels, num_filters]\n weights = new_weights(shape=shape)\n biases = new_biases(length=num_filters)\n layer = tf.nn.conv2d(input=input,\n filter=weights,\n strides=[1, 1, 1, 1],\n padding='SAME')\n\n \n layer += biases\n \n if use_pooling:\n layer = tf.nn.max_pool(value=layer,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n layer = tf.nn.relu(layer)\n return layer, weights\n\ndef flatten_layer(layer):\n layer_shape = layer.get_shape()\n num_features = layer_shape[1:4].num_elements()\n layer_flat = tf.reshape(layer, [-1, num_features])\n return layer_flat, num_features\n\n\ndef new_fc_layer(input, num_inputs,num_outputs,use_relu=True):\n weights = new_weights(shape=[num_inputs, num_outputs])\n biases = new_biases(length=num_outputs)\n layer = tf.matmul(input, weights) + biases\n \n if use_relu:\n layer = tf.nn.relu(layer)\n\n return layer\n\nx = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')\nx_image = tf.reshape(x, [-1, img_size, img_size, num_channels])\ny_true = tf.placeholder(tf.float32, shape=[None, 10], name='y_true')\ny_true_cls = tf.argmax(y_true, dimension=1)\n\n\nlayer_conv1, weights_conv1 = \\\n new_conv_layer(input=x_image,\n num_input_channels=num_channels,\n filter_size=filter_size1,\n num_filters=num_filters1,\n use_pooling=True)\nlayer_conv1\n\nlayer_conv2, weights_conv2 = \\\n new_conv_layer(input=layer_conv1,\n num_input_channels=num_filters1,\n filter_size=filter_size2,\n num_filters=num_filters2,\n use_pooling=True)\nlayer_conv2\n\nlayer_flat, num_features = flatten_layer(layer_conv2)\nlayer_flat\n\nnum_features\n\n\nlayer_fc1 = new_fc_layer(input=layer_flat,\n num_inputs=num_features,\n num_outputs=fc_size,\n use_relu=True)\nlayer_fc1\n\nlayer_fc2 = new_fc_layer(input=layer_fc1,\n num_inputs=fc_size,\n num_outputs=num_classes,\n use_relu=False)\nlayer_fc2\n\ny_pred = tf.nn.softmax(layer_fc2)\ny_pred_cls = tf.argmax(y_pred, dimension=1)\n\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,\n labels=y_true)\ncost = tf.reduce_mean(cross_entropy)\n\noptimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)\ncorrect_prediction = tf.equal(y_pred_cls, y_true_cls)\n\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsession = tf.Session()\nsession.run(tf.global_variables_initializer())\n\ntrain_batch_size = 64\ntotal_iterations = 0\n\ndef optimize(num_iterations):\n global total_iterations\n start_time = time.time()\n for i in range(total_iterations, total_iterations + num_iterations):\n x_batch, y_true_batch = data.train.next_batch(train_batch_size)\n feed_dict_train = {x: x_batch, y_true: y_true_batch}\n session.run(optimizer, feed_dict=feed_dict_train)\n\n if i % 100 == 0:\n acc = session.run(accuracy, feed_dict=feed_dict_train)\n msg = \"Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}\"\n print(msg.format(i + 1, acc))\n \n total_iterations += num_iterations\n end_time = time.time()\n time_dif = end_time - start_time\n print(\"Time usage: \" + str(timedelta(seconds=int(round(time_dif)))))\n \ndef plot_example_errors(cls_pred, correct):\n incorrect = (correct == False)\n images = data.test.images[incorrect]\n cls_pred = cls_pred[incorrect]\n cls_true = data.test.cls[incorrect]\n plot_images(images=images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9])\n \ndef plot_confusion_matrix(cls_pred):\n cls_true = data.test.cls\n cm = confusion_matrix(y_true=cls_true, y_pred=cls_pred)\n print(cm)\n plt.matshow(cm)\n\n \n plt.colorbar()\n tick_marks = np.arange(num_classes)\n plt.xticks(tick_marks, range(num_classes))\n plt.yticks(tick_marks, range(num_classes))\n plt.xlabel('Predicted')\n plt.ylabel('True')\n plt.show()\n\ntest_batch_size = 256\ndef print_test_accuracy(show_example_errors=False, show_confusion_matrix=False):\n num_test = len(data.test.images)\n cls_pred = np.zeros(shape=num_test, dtype=np.int)\n \n i = 0\n\n while i < num_test:\n j = min(i + test_batch_size, num_test)\n images = data.test.images[i:j, :]\n labels = data.test.labels[i:j, :]\n feed_dict = {x: images, y_true: labels}\n cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)\n i = j\n \n cls_true = data.test.cls\n correct = (cls_true == cls_pred)\n correct_sum = correct.sum()\n acc = float(correct_sum) / num_test\n msg = \"Accuracy on Test-Set: {0:.1%} ({1} / {2})\"\n print(msg.format(acc, correct_sum, num_test))\n if show_example_errors:\n print(\"Example errors:\")\n plot_example_errors(cls_pred=cls_pred, correct=correct)\n \n if show_confusion_matrix:\n print(\"Confusion Matrix:\")\n plot_confusion_matrix(cls_pred=cls_pred)\n \nprint_test_accuracy()\n\noptimize(num_iterations=1)\nprint_test_accuracy()\n\noptimize(num_iterations=99) \nprint_test_accuracy(show_example_errors=True)\n\noptimize(num_iterations=900)\nprint_test_accuracy(show_example_errors=True)\n\noptimize(num_iterations=9000)\nprint_test_accuracy(show_example_errors=True, show_confusion_matrix=True)\n\ndef plot_conv_weights(weights, input_channel=0):\n w = session.run(weights)\n w_min = np.min(w)\n w_max = np.max(w)\n num_filters = w.shape[3]\n num_grids = math.ceil(math.sqrt(num_filters))\n fig, axes = plt.subplots(num_grids, num_grids)\n for i, ax in enumerate(axes.flat):\n if i<num_filters:\n img = w[:, :, input_channel, i]\n ax.imshow(img, vmin=w_min, vmax=w_max,\n interpolation='nearest', cmap='seismic')\n \n ax.set_xticks([])\n ax.set_yticks([])\n plt.show()\n\ndef plot_conv_layer(layer, image):\n feed_dict = {x: [image]}\n values = session.run(layer, feed_dict=feed_dict)\n num_filters = values.shape[3]\n num_grids = math.ceil(math.sqrt(num_filters))\n fig, axes = plt.subplots(num_grids, num_grids)\n for i, ax in enumerate(axes.flat):\n if i<num_filters:\n img = values[0, :, :, i]\n\n ax.imshow(img, interpolation='nearest', cmap='binary')\n \n ax.set_xticks([])\n ax.set_yticks([])\n \n plt.show()\n \ndef plot_image(image):\n plt.imshow(image.reshape(img_shape),\n interpolation='nearest',\n cmap='binary')\n\n plt.show()\n \n\nimage1 = data.test.images[0]\nplot_image(image1)\n\nimage2 = data.test.images[13]\nplot_image(image2)\n\nplot_conv_weights(weights=weights_conv1)\nplot_conv_layer(layer=layer_conv1, image=image1)\nplot_conv_layer(layer=layer_conv1, image=image2)\nplot_conv_weights(weights=weights_conv2, input_channel=0)\nplot_conv_weights(weights=weights_conv2, input_channel=1)\nplot_conv_layer(layer=layer_conv2, image=image1)\nplot_conv_layer(layer=layer_conv2, image=image2)\n\n\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.max_pool", "tensorflow.equal", "tensorflow.cast", "sklearn.metrics.confusion_matrix", "numpy.max", "tensorflow.train.AdamOptimizer", "matplotlib.pyplot.matshow", "tensorflow.nn.conv2d", "numpy.arange", "numpy.argmax", "tensorflow.Session", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "numpy.zeros", "tensorflow.matmul", "tensorflow.truncated_normal", "numpy.min", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "tensorflow.nn.relu", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.reshape", "matplotlib.pyplot.subplots", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
tiancity-NJU/REID
[ "125a520a9c0b94440a7757e6f3c3c8bf976906ec", "125a520a9c0b94440a7757e6f3c3c8bf976906ec" ]
[ "script/bfe.py", "models/BFE.py" ]
[ "# encoding: utf-8\nimport os\nimport sys\nfrom os import path as osp\nfrom pprint import pprint\n\nimport numpy as np\nimport torch\nfrom tensorboardX import SummaryWriter\nfrom torch import nn\nfrom torch.backends import cudnn\nfrom torch.utils.data import DataLoader\n\nsys.path.insert(0,os.path.abspath(os.path.dirname(__file__)+os.sep+'..'))\n\nfrom config import opt\nfrom datasets import data_manager\nfrom datasets.data_loader import ImageData\nfrom datasets.samplers import RandomIdentitySampler\nfrom models.networks import ResNetBuilder, IDE, Resnet, BFE\n#from models.BFE import BFE\nfrom trainers.evaluator import ResNetEvaluator\nfrom trainers.trainer import cls_tripletTrainer\nfrom utils.loss import CrossEntropyLabelSmooth, TripletLoss, Margin\nfrom utils.LiftedStructure import LiftedStructureLoss\nfrom utils.DistWeightDevianceLoss import DistWeightBinDevianceLoss\nfrom utils.serialization import Logger, save_checkpoint\nfrom utils.transforms import TestTransform, TrainTransform\n\n\ndef train(**kwargs):\n opt._parse(kwargs)\n opt.model_name = 'bfe'\n # set random seed and cudnn benchmark\n torch.manual_seed(opt.seed)\n os.makedirs(opt.save_dir, exist_ok=True)\n use_gpu = torch.cuda.is_available()\n sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))\n\n print('=========user config==========')\n pprint(opt._state_dict())\n print('============end===============')\n\n if use_gpu:\n print('currently using GPU')\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(opt.seed)\n else:\n print('currently using cpu')\n\n print('initializing dataset {}'.format(opt.dataset))\n dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)\n\n pin_memory = True if use_gpu else False\n\n summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))\n\n trainloader = DataLoader(\n ImageData(dataset.train, TrainTransform(opt.datatype)),\n sampler=RandomIdentitySampler(dataset.train, opt.num_instances),\n batch_size=opt.train_batch, num_workers=opt.workers,\n pin_memory=pin_memory, drop_last=True\n )\n\n queryloader = DataLoader(\n ImageData(dataset.query, TestTransform(opt.datatype)),\n batch_size=opt.test_batch, num_workers=opt.workers,\n pin_memory=pin_memory\n )\n\n galleryloader = DataLoader(\n ImageData(dataset.gallery, TestTransform(opt.datatype)),\n batch_size=opt.test_batch, num_workers=opt.workers,\n pin_memory=pin_memory\n )\n queryFliploader = DataLoader(\n ImageData(dataset.query, TestTransform(opt.datatype, True)),\n batch_size=opt.test_batch, num_workers=opt.workers,\n pin_memory=pin_memory\n )\n\n galleryFliploader = DataLoader(\n ImageData(dataset.gallery, TestTransform(opt.datatype, True)),\n batch_size=opt.test_batch, num_workers=opt.workers,\n pin_memory=pin_memory\n )\n\n print('initializing model ...')\n\n\n model = BFE(dataset.num_train_pids, 1.0, 0.33)\n\n\n optim_policy = model.get_optim_policy()\n\n if opt.pretrained_model:\n state_dict = torch.load(opt.pretrained_model)['state_dict']\n # state_dict = {k: v for k, v in state_dict.items() \\\n # if not ('reduction' in k or 'softmax' in k)}\n model.load_state_dict(state_dict, False)\n print('load pretrained model ' + opt.pretrained_model)\n print('model size: {:.5f}M'.format(sum(p.numel() for p in model.parameters()) / 1e6))\n\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n reid_evaluator = ResNetEvaluator(model)\n\n if opt.evaluate:\n reid_evaluator.evaluate(queryloader, galleryloader,\n queryFliploader, galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig)\n return\n\n # xent_criterion = nn.CrossEntropyLoss()\n xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids)\n\n if opt.loss == 'triplet':\n embedding_criterion = TripletLoss(opt.margin)\n elif opt.loss == 'lifted':\n embedding_criterion = LiftedStructureLoss(hard_mining=True)\n elif opt.loss == 'weight':\n embedding_criterion = Margin()\n\n def criterion(triplet_y, softmax_y, labels):\n losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \\\n [xent_criterion(output, labels) for output in softmax_y]\n loss = sum(losses)\n return loss\n\n # get optimizer\n if opt.optim == \"sgd\":\n optimizer = torch.optim.SGD(optim_policy, lr=opt.lr, momentum=0.9, weight_decay=opt.weight_decay)\n else:\n optimizer = torch.optim.Adam(optim_policy, lr=opt.lr, weight_decay=opt.weight_decay)\n\n start_epoch = opt.start_epoch\n # get trainer and evaluator\n reid_trainer = cls_tripletTrainer(opt, model, optimizer, criterion, summary_writer)\n\n def adjust_lr(optimizer, ep):\n if ep < 50:\n lr = 1e-4 * (ep // 5 + 1)\n elif ep < 200:\n lr = 1e-3\n elif ep < 300:\n lr = 1e-4\n else:\n lr = 1e-5\n for p in optimizer.param_groups:\n p['lr'] = lr\n\n # start training\n best_rank1 = opt.best_rank\n best_epoch = 0\n for epoch in range(start_epoch, opt.max_epoch):\n if opt.adjust_lr:\n adjust_lr(optimizer, epoch + 1)\n reid_trainer.train(epoch, trainloader)\n\n # skip if not save model\n if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or (epoch + 1) == opt.max_epoch:\n if opt.mode == 'class':\n rank1 = test(model, queryloader)\n else:\n rank1 = reid_evaluator.evaluate(queryloader, galleryloader, queryFliploader, galleryFliploader)\n is_best = rank1 > best_rank1\n if is_best:\n best_rank1 = rank1\n best_epoch = epoch + 1\n\n if use_gpu:\n state_dict = model.module.state_dict()\n else:\n state_dict = model.state_dict()\n save_checkpoint({'state_dict': state_dict, 'epoch': epoch + 1},\n is_best=is_best, save_dir=opt.save_dir,\n filename='checkpoint_ep' + str(epoch + 1) + '.pth.tar')\n\n print('Best rank-1 {:.1%}, achived at epoch {}'.format(best_rank1, best_epoch))\n\n\ndef test(model, queryloader):\n model.eval()\n correct = 0\n with torch.no_grad():\n for data, target, _ in queryloader:\n output = model(data).cpu()\n # get the index of the max log-probability\n pred = output.max(1, keepdim=True)[1]\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n rank1 = 100. * correct / len(queryloader.dataset)\n print('\\nTest set: Accuracy: {}/{} ({:.2f}%)\\n'.format(correct, len(queryloader.dataset), rank1))\n return rank1\n\n\nif __name__ == '__main__':\n import fire\n\n fire.Fire()\n", "# encoding: utf-8\nimport copy\nimport itertools\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\nimport random\nfrom scipy.spatial.distance import cdist\nfrom sklearn.preprocessing import normalize\nfrom torch import nn, optim\nfrom torch.utils.data import dataloader\nfrom torchvision import transforms\nfrom torchvision.models.resnet import Bottleneck, resnet50\nfrom torchvision.transforms import functional\n\nfrom .resnet import ResNet\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('Conv') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('BatchNorm') != -1:\n if m.affine:\n nn.init.normal_(m.weight, 1.0, 0.02)\n nn.init.constant_(m.bias, 0.0)\n\n\ndef weights_init_classifier(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.normal_(m.weight, std=0.001)\n if len(m.bias):\n nn.init.constant_(m.bias, 0.0)\n\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y\n\n\nclass BatchDrop(nn.Module):\n def __init__(self, h_ratio, w_ratio):\n super(BatchDrop, self).__init__()\n self.h_ratio = h_ratio\n self.w_ratio = w_ratio\n\n def forward(self, x):\n if self.training:\n h, w = x.size()[-2:]\n rh = round(self.h_ratio * h)\n rw = round(self.w_ratio * w)\n sx = random.randint(0, h - rh)\n sy = random.randint(0, w - rw)\n mask = x.new_ones(x.size())\n mask[:, :, sx:sx + rh, sy:sy + rw] = 0\n x = x * mask\n return x\n\n\nclass BatchCrop(nn.Module):\n def __init__(self, ratio):\n super(BatchCrop, self).__init__()\n self.ratio = ratio\n\n def forward(self, x):\n if self.training:\n h, w = x.size()[-2:]\n rw = int(self.ratio * w)\n start = random.randint(0, h - 1)\n if start + rw > h:\n select = list(range(0, start + rw - h)) + list(range(start, h))\n else:\n select = list(range(start, start + rw))\n mask = x.new_zeros(x.size())\n mask[:, :, select, :] = 1\n x = x * mask\n return x\n\n\n\n\nclass BFE(nn.Module):\n def __init__(self, num_classes, width_ratio=0.5, height_ratio=0.5):\n super(BFE, self).__init__()\n resnet = resnet50(pretrained=True)\n self.backbone = nn.Sequential(\n resnet.conv1,\n resnet.bn1,\n resnet.relu,\n resnet.maxpool,\n resnet.layer1, # res_conv2\n resnet.layer2, # res_conv3\n resnet.layer3, # res_conv4\n )\n self.res_part = nn.Sequential(\n Bottleneck(1024, 512, stride=1, downsample=nn.Sequential(\n nn.Conv2d(1024, 2048, kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(2048),\n )),\n Bottleneck(2048, 512),\n Bottleneck(2048, 512),\n )\n self.res_part.load_state_dict(resnet.layer4.state_dict())\n reduction = nn.Sequential(\n nn.Conv2d(2048, 512, 1),\n nn.BatchNorm2d(512),\n nn.ReLU()\n )\n # global branch\n self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.global_softmax = nn.Linear(512, num_classes)\n self.global_softmax.apply(weights_init_kaiming)\n self.global_reduction = copy.deepcopy(reduction)\n self.global_reduction.apply(weights_init_kaiming)\n\n\n self.global_conv_list=nn.ModuleList()\n for _ in range(6):\n self.global_conv_list.append(nn.Sequential(\n nn.Conv2d(2048, 256, 1),\n nn.BatchNorm2d(256),\n nn.ReLU()\n )\n )\n self.global_softmax_list=nn.ModuleList()\n for _ in range(6):\n self.global_softmax_list.append(nn.Sequential(\n nn.Linear(256, num_classes)\n )\n )\n\n for i in range(6):\n self.global_conv_list[i].apply(weights_init_kaiming)\n self.global_softmax_list[i].apply(weights_init_kaiming)\n\n\n # part branch\n self.res_part2 = Bottleneck(2048, 512)\n\n self.part_maxpool = nn.AdaptiveMaxPool2d((1, 1))\n self.batch_crop = BatchDrop(height_ratio, width_ratio)\n self.reduction = nn.Sequential(\n nn.Linear(2048, 1024, 1),\n nn.BatchNorm1d(1024),\n nn.ReLU()\n )\n self.reduction.apply(weights_init_kaiming)\n self.softmax = nn.Linear(1024, num_classes)\n self.softmax.apply(weights_init_kaiming)\n\n def forward(self, x):\n \"\"\"\n :param x: input image tensor of (N, C, H, W)\n :return: (prediction, triplet_losses, softmax_losses)\n \"\"\"\n x = self.backbone(x)\n x = self.res_part(x)\n\n predict = []\n triplet_features = []\n softmax_features = []\n\n global branch\n glob = self.global_avgpool(x)\n global_triplet_feature = self.global_reduction(glob).squeeze()\n global_softmax_class = self.global_softmax(global_triplet_feature)\n softmax_features.append(global_softmax_class)\n triplet_features.append(global_triplet_feature)\n predict.append(global_triplet_feature)\n\n # assert x.size(2) % 6 == 0\n # stripes_h = int(x.size(2) / 6)\n #\n # for i in range(6):\n # feat = F.avg_pool2d(x[:, :, i * stripes_h:(i + 1) * stripes_h, :], (stripes_h, x.size(-1)))\n # feat = self.global_conv_list[i](feat)\n # feat =feat.view(feat.size(0), -1)\n # triplet_features.append(feat)\n #\n # logit = self.global_softmax_list[i](feat)\n # softmax_features.append(logit)\n #\n #\n # tmp = torch.stack(triplet_features)\n # tmp = tmp.permute(1, 0, 2).contiguous()\n #\n # tmp = tmp.view(tmp.size(0), -1)\n # predict.append(tmp)\n\n\n # part branch\n\n x = self.res_part2(x)\n\n x = self.batch_crop(x)\n triplet_feature = self.part_maxpool(x).squeeze()\n feature = self.reduction(triplet_feature)\n softmax_feature = self.softmax(feature)\n triplet_features.append(feature)\n softmax_features.append(softmax_feature)\n predict.append(feature)\n\n if self.training:\n return triplet_features, softmax_features\n else:\n return torch.cat(predict, 1)\n\n def get_optim_policy(self):\n params = [\n {'params': self.backbone.parameters()},\n {'params': self.res_part.parameters()},\n {'params': self.global_reduction.parameters()},\n {'params': self.global_softmax.parameters()},\n {'params': self.res_part2.parameters()},\n {'params': self.reduction.parameters()},\n {'params': self.softmax.parameters()},\n ]\n return params" ]
[ [ "torch.optim.Adam", "torch.load", "torch.manual_seed", "torch.no_grad", "torch.cuda.is_available", "torch.optim.SGD", "torch.cuda.manual_seed_all", "torch.nn.DataParallel" ], [ "torch.nn.Sequential", "torch.nn.AdaptiveMaxPool2d", "torch.nn.BatchNorm1d", "torch.cat", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Sigmoid", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.init.normal_", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mathildebadoual/pandapower
[ "9ba4bcb78e84b644d2ba6df0c08e285c54af8ddc", "de004efc1b7432a633792af4f551f7635a02db47" ]
[ "pandapower/build_gen.py", "pandapower/test/conftest.py" ]
[ "# -*- coding: utf-8 -*-\r\n\r\n# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics\r\n# and Energy System Technology (IEE), Kassel. All rights reserved.\r\n\r\n\r\nimport numpy as np\r\nimport numpy.core.numeric as ncn\r\nfrom numpy import array, zeros, isnan\r\nfrom pandas import DataFrame\r\nfrom pandapower.idx_bus import PV, REF, VA, VM, BUS_TYPE, NONE, VMAX, VMIN, PQ\r\nfrom pandapower.idx_gen import QMIN, QMAX, PMIN, PMAX, GEN_STATUS, GEN_BUS, PG, VG, QG\r\n\r\n\r\ndef _build_gen_ppc(net, ppc):\r\n '''\r\n Takes the empty ppc network and fills it with the gen values. The gen\r\n datatype will be float afterwards.\r\n\r\n **INPUT**:\r\n **net** -The pandapower format network\r\n\r\n **ppc** - The PYPOWER format network to fill in values\r\n '''\r\n\r\n mode = net[\"_options\"][\"mode\"]\r\n\r\n # if mode == power flow or short circuit...\r\n if mode == \"pf\" or mode == \"sc\":\r\n\r\n # get in service elements\r\n _is_elements = net[\"_is_elements\"]\r\n eg_is_mask = _is_elements['ext_grid']\r\n gen_is_mask = _is_elements['gen']\r\n\r\n eg_end = np.sum(eg_is_mask)\r\n gen_end = eg_end + np.sum(gen_is_mask)\r\n xw_end = gen_end + len(net[\"xward\"])\r\n\r\n # define default q limits\r\n q_lim_default = 1e9 # which is 1000 TW - should be enough for distribution grids.\r\n p_lim_default = 1e9\r\n\r\n _init_ppc_gen(ppc, xw_end, 0)\r\n if mode == \"sc\":\r\n return\r\n # add generator / pv data\r\n if gen_end > eg_end:\r\n _build_pp_gen(net, ppc, gen_is_mask, eg_end, gen_end, q_lim_default, p_lim_default)\r\n\r\n _build_pp_ext_grid(net, ppc, eg_is_mask, eg_end)\r\n\r\n # add extended ward pv node data\r\n if xw_end > gen_end:\r\n _build_pp_xward(net, ppc, gen_end, xw_end, q_lim_default)\r\n\r\n # if mode == optimal power flow...\r\n if mode == \"opf\":\r\n\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\r\n\r\n if len(net.dcline) > 0:\r\n ppc[\"dcline\"] = net.dcline[[\"loss_kw\", \"loss_percent\"]].values\r\n # get in service elements\r\n _is_elements = net[\"_is_elements\"]\r\n eg_is = net[\"ext_grid\"][_is_elements['ext_grid']]\r\n gen_is = net[\"gen\"][_is_elements['gen']]\r\n sg_is = net.sgen[(net.sgen.in_service & net.sgen.controllable) == True] \\\r\n if \"controllable\" in net.sgen.columns else DataFrame()\r\n l_is = net.load[(net.load.in_service & net.load.controllable) == True] \\\r\n if \"controllable\" in net.load.columns else DataFrame()\r\n stor_is = net.storage[(net.storage.in_service & net.storage.controllable) == True] \\\r\n if \"controllable\" in net.storage.columns else DataFrame()\r\n\r\n _is_elements[\"sgen_controllable\"] = sg_is\r\n _is_elements[\"load_controllable\"] = l_is\r\n _is_elements[\"storage_controllable\"] = stor_is\r\n eg_end = len(eg_is)\r\n gen_end = eg_end + len(gen_is)\r\n sg_end = gen_end + len(sg_is)\r\n l_end = sg_end + len(l_is)\r\n stor_end = l_end + len(stor_is)\r\n\r\n q_lim_default = 1e9 # which is 1000 TW - should be enough for distribution grids.\r\n p_lim_default = 1e9 # changes must be considered in check_opf_data\r\n delta = net[\"_options\"][\"delta\"]\r\n\r\n # initialize generator matrix\r\n ppc[\"gen\"] = zeros(shape=(stor_end, 21), dtype=float)\r\n ppc[\"gen\"][:] = array([0, 0, 0, q_lim_default, -q_lim_default, 1., 1., 1, p_lim_default,\r\n -p_lim_default, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\r\n\r\n # add sgens first so pv bus types won't be overwritten\r\n if sg_end > gen_end:\r\n gen_buses = bus_lookup[sg_is[\"bus\"].values]\r\n\r\n ppc[\"gen\"][gen_end:sg_end, GEN_BUS] = gen_buses\r\n ppc[\"gen\"][gen_end:sg_end, PG] = - sg_is[\"p_kw\"].values * 1e-3 * sg_is[\"scaling\"].values\r\n ppc[\"gen\"][gen_end:sg_end, QG] = sg_is[\"q_kvar\"].values * 1e-3 * sg_is[\"scaling\"].values\r\n\r\n # set bus values for generator buses\r\n ppc[\"bus\"][gen_buses, BUS_TYPE] = PQ\r\n\r\n # set constraints for controllable sgens\r\n if \"min_q_kvar\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, QMAX] = - (sg_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][gen_end:sg_end, [QMIN]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][gen_end:sg_end, [QMIN]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, QMIN] = - (sg_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][gen_end:sg_end, [QMAX]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][gen_end:sg_end, [QMAX]] = min_q_kvar - 1e-10 # TODO Why this? (M.Scharf, 2018-02)\r\n\r\n if \"max_p_kw\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, PMIN] = - (sg_is[\"max_p_kw\"].values * 1e-3 + delta)\r\n max_p_kw = ppc[\"gen\"][gen_end:sg_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][gen_end:sg_end, [PMIN]] = max_p_kw\r\n\r\n if \"min_p_kw\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, PMAX] = - (sg_is[\"min_p_kw\"].values * 1e-3 - delta)\r\n min_p_kw = ppc[\"gen\"][gen_end:sg_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][gen_end:sg_end, [PMAX]] = min_p_kw\r\n\r\n # add controllable loads\r\n if l_end > sg_end:\r\n load_buses = bus_lookup[l_is[\"bus\"].values]\r\n\r\n ppc[\"gen\"][sg_end:l_end, GEN_BUS] = load_buses\r\n ppc[\"gen\"][sg_end:l_end, PG] = - l_is[\"p_kw\"].values * 1e-3 * l_is[\"scaling\"].values\r\n ppc[\"gen\"][sg_end:l_end, QG] = l_is[\"q_kvar\"].values * 1e-3 * l_is[\"scaling\"].values\r\n\r\n # set bus values for controllable loads\r\n ppc[\"bus\"][load_buses, BUS_TYPE] = PQ\r\n\r\n # set constraints for controllable loads\r\n if \"min_q_kvar\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, QMAX] = - (l_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][sg_end:l_end, [QMIN]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][sg_end:l_end, [QMIN]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, QMIN] = - (l_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][sg_end:l_end, [QMAX]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][sg_end:l_end, [QMAX]] = min_q_kvar\r\n\r\n if \"min_p_kw\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, PMIN] = - (l_is[\"max_p_kw\"].values * 1e-3 + delta)\r\n max_p_kw = ppc[\"gen\"][sg_end:l_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][sg_end:l_end, [PMIN]] = max_p_kw\r\n\r\n if \"max_p_kw\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, PMAX] = - (l_is[\"min_p_kw\"].values * 1e-3 - delta)\r\n min_p_kw = ppc[\"gen\"][sg_end:l_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][sg_end:l_end, [PMAX]] = min_p_kw\r\n\r\n # add controllable storages\r\n if stor_end > l_end:\r\n stor_buses = bus_lookup[stor_is[\"bus\"].values]\r\n\r\n ppc[\"gen\"][l_end:stor_end, GEN_BUS] = stor_buses\r\n ppc[\"gen\"][l_end:stor_end, PG] = - stor_is[\"p_kw\"].values * 1e-3 * stor_is[\"scaling\"].values\r\n ppc[\"gen\"][l_end:stor_end, QG] = stor_is[\"q_kvar\"].values * 1e-3 * stor_is[\"scaling\"].values\r\n\r\n # set bus values for generator buses\r\n ppc[\"bus\"][stor_buses, BUS_TYPE] = PQ\r\n\r\n # set constraints for controllable sgens\r\n if \"min_q_kvar\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, QMAX] = - (stor_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][l_end:stor_end, [QMIN]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][l_end:stor_end, [QMIN]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, QMIN] = - (stor_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][l_end:stor_end, [QMAX]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][l_end:stor_end, [QMAX]] = min_q_kvar\r\n\r\n if \"max_p_kw\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, PMIN] = - (stor_is[\"max_p_kw\"].values * 1e-3 + delta)\r\n max_p_kw = ppc[\"gen\"][l_end:stor_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][l_end:stor_end, [PMIN]] = max_p_kw\r\n\r\n if \"min_p_kw\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, PMAX] = - (stor_is[\"min_p_kw\"].values * 1e-3 - delta)\r\n min_p_kw = ppc[\"gen\"][l_end:stor_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][l_end:stor_end, [PMAX]] = min_p_kw\r\n\r\n # add ext grid / slack data\r\n ppc[\"gen\"][:eg_end, GEN_BUS] = bus_lookup[eg_is[\"bus\"].values]\r\n ppc[\"gen\"][:eg_end, VG] = eg_is[\"vm_pu\"].values\r\n ppc[\"gen\"][:eg_end, GEN_STATUS] = eg_is[\"in_service\"].values\r\n if \"max_p_kw\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, PMIN] = - (eg_is[\"max_p_kw\"].values * 1e-3 - delta)\r\n max_p_kw = ppc[\"gen\"][:eg_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][:eg_end, [PMIN]] = max_p_kw\r\n\r\n if \"min_p_kw\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, PMAX] = - (eg_is[\"min_p_kw\"].values * 1e-3 + delta)\r\n min_p_kw = ppc[\"gen\"][:eg_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][:eg_end, [PMAX]] = min_p_kw\r\n\r\n if \"min_q_kvar\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, QMAX] = - (eg_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][:eg_end, [QMIN]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][:eg_end, [QMIN]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, QMIN] = - (eg_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][:eg_end, [QMAX]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][:eg_end, [QMAX]] = min_q_kvar - 1e-10\r\n\r\n # set bus values for external grid buses\r\n eg_buses = bus_lookup[eg_is[\"bus\"].values]\r\n if calculate_voltage_angles:\r\n ppc[\"bus\"][eg_buses, VA] = eg_is[\"va_degree\"].values\r\n ppc[\"bus\"][eg_buses, BUS_TYPE] = REF\r\n ppc[\"bus\"][eg_buses, VM] = eg_is[\"vm_pu\"].values\r\n\r\n # REF busses don't have flexible voltages by definition:\r\n ppc[\"bus\"][eg_buses, VMAX] = ppc[\"bus\"][ppc[\"bus\"][:, BUS_TYPE] == REF, VM]\r\n ppc[\"bus\"][eg_buses, VMIN] = ppc[\"bus\"][ppc[\"bus\"][:, BUS_TYPE] == REF, VM]\r\n\r\n # add generator / pv data\r\n if gen_end > eg_end:\r\n ppc[\"gen\"][eg_end:gen_end, GEN_BUS] = bus_lookup[gen_is[\"bus\"].values]\r\n ppc[\"gen\"][eg_end:gen_end, PG] = - gen_is[\"p_kw\"].values * 1e-3 * gen_is[\"scaling\"].values\r\n ppc[\"gen\"][eg_end:gen_end, VG] = gen_is[\"vm_pu\"].values\r\n\r\n # set bus values for generator buses\r\n gen_buses = bus_lookup[gen_is[\"bus\"].values]\r\n ppc[\"bus\"][gen_buses, BUS_TYPE] = PV\r\n ppc[\"bus\"][gen_buses, VM] = gen_is[\"vm_pu\"].values\r\n\r\n # set constraints for PV generators\r\n _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, _is_elements['gen'])\r\n _copy_p_limits_to_ppc(net, ppc, eg_end, gen_end, _is_elements['gen'])\r\n\r\n _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default)\r\n _replace_nans_with_default_p_limits_in_ppc(ppc, eg_end, gen_end, p_lim_default)\r\n\r\n\r\ndef _init_ppc_gen(ppc, xw_end, q_lim_default):\r\n # initialize generator matrix\r\n ppc[\"gen\"] = np.zeros(shape=(xw_end, 21), dtype=float)\r\n ppc[\"gen\"][:] = np.array([0, 0, 0, q_lim_default, -q_lim_default, 1.,\r\n 1., 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\r\n\r\n\r\ndef _build_pp_ext_grid(net, ppc, eg_is_mask, eg_end):\r\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n # add ext grid / slack data\r\n eg_buses = bus_lookup[net[\"ext_grid\"][\"bus\"].values[eg_is_mask]]\r\n ppc[\"gen\"][:eg_end, GEN_BUS] = eg_buses\r\n ppc[\"gen\"][:eg_end, VG] = net[\"ext_grid\"][\"vm_pu\"].values[eg_is_mask]\r\n ppc[\"gen\"][:eg_end, GEN_STATUS] = True\r\n\r\n # set bus values for external grid buses\r\n if calculate_voltage_angles:\r\n ppc[\"bus\"][eg_buses, VA] = net[\"ext_grid\"][\"va_degree\"].values[eg_is_mask]\r\n ppc[\"bus\"][eg_buses, BUS_TYPE] = REF\r\n # _build_gen_lookups(net, \"ext_grid\", 0, eg_end)\r\n\r\n\r\ndef _build_pp_gen(net, ppc, gen_is_mask, eg_end, gen_end, q_lim_default, p_lim_default):\r\n\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n copy_constraints_to_ppc = net[\"_options\"][\"copy_constraints_to_ppc\"]\r\n\r\n gen_buses = bus_lookup[net[\"gen\"][\"bus\"].values[gen_is_mask]]\r\n gen_is_vm = net[\"gen\"][\"vm_pu\"].values[gen_is_mask]\r\n ppc[\"gen\"][eg_end:gen_end, GEN_BUS] = gen_buses\r\n ppc[\"gen\"][eg_end:gen_end, PG] = - (net[\"gen\"][\"p_kw\"].values[gen_is_mask] * 1e-3 *\r\n net[\"gen\"][\"scaling\"].values[gen_is_mask])\r\n ppc[\"gen\"][eg_end:gen_end, VG] = gen_is_vm\r\n\r\n # set bus values for generator buses\r\n\r\n ppc[\"bus\"][gen_buses, BUS_TYPE] = PV\r\n ppc[\"bus\"][gen_buses, VM] = gen_is_vm\r\n\r\n _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask)\r\n _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default)\r\n\r\n if copy_constraints_to_ppc:\r\n _copy_p_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask)\r\n _replace_nans_with_default_p_limits_in_ppc(ppc, eg_end, gen_end, p_lim_default)\r\n\r\n # _build_gen_lookups(net, \"gen\", eg_end, gen_end)\r\n\r\n\r\ndef _build_pp_xward(net, ppc, gen_end, xw_end, q_lim_default, update_lookup=True):\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n xw = net[\"xward\"]\r\n xw_is = net[\"_is_elements\"]['xward']\r\n if update_lookup:\r\n ppc[\"gen\"][gen_end:xw_end, GEN_BUS] = bus_lookup[xw[\"ad_bus\"].values]\r\n ppc[\"gen\"][gen_end:xw_end, VG] = xw[\"vm_pu\"].values\r\n ppc[\"gen\"][gen_end:xw_end, GEN_STATUS] = xw_is\r\n ppc[\"gen\"][gen_end:xw_end, QMIN] = -q_lim_default\r\n ppc[\"gen\"][gen_end:xw_end, QMAX] = q_lim_default\r\n\r\n xward_buses = bus_lookup[net[\"xward\"][\"ad_bus\"].values]\r\n ppc[\"bus\"][xward_buses[xw_is], BUS_TYPE] = PV\r\n ppc[\"bus\"][xward_buses[~xw_is], BUS_TYPE] = NONE\r\n ppc[\"bus\"][xward_buses, VM] = net[\"xward\"][\"vm_pu\"].values\r\n\r\n\r\n\r\n\r\ndef _update_gen_ppc(net, ppc):\r\n '''\r\n Takes the ppc network and updates the gen values from the values in net.\r\n\r\n **INPUT**:\r\n **net** -The pandapower format network\r\n\r\n **ppc** - The PYPOWER format network to fill in values\r\n '''\r\n # get options from net\r\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n # get in service elements\r\n _is_elements = net[\"_is_elements\"]\r\n gen_is_mask = _is_elements['gen']\r\n # TODO maybe speed up things here, too\r\n eg_is = net[\"ext_grid\"][_is_elements['ext_grid']]\r\n gen_is = net[\"gen\"][_is_elements['gen']]\r\n\r\n eg_end = len(eg_is)\r\n gen_end = eg_end + len(gen_is)\r\n xw_end = gen_end + len(net[\"xward\"])\r\n\r\n q_lim_default = 1e9 # which is 1000 TW - should be enough for distribution grids.\r\n\r\n # add ext grid / slack data\r\n ext_grid_lookup = net[\"_pd2ppc_lookups\"][\"ext_grid\"]\r\n ext_grid_idx_ppc = ext_grid_lookup[eg_is.index]\r\n ppc[\"gen\"][ext_grid_idx_ppc, VG] = eg_is[\"vm_pu\"].values\r\n ppc[\"gen\"][ext_grid_idx_ppc, GEN_STATUS] = eg_is[\"in_service\"].values\r\n\r\n # set bus values for external grid buses\r\n if calculate_voltage_angles:\r\n # eg_buses = bus_lookup[eg_is[\"bus\"].values]\r\n ppc[\"bus\"][ext_grid_idx_ppc, VA] = eg_is[\"va_degree\"].values\r\n\r\n # add generator / pv data\r\n if gen_end > eg_end:\r\n gen_lookup = net[\"_pd2ppc_lookups\"][\"gen\"]\r\n gen_idx_ppc = gen_lookup[gen_is.index]\r\n ppc[\"gen\"][gen_idx_ppc, PG] = - gen_is[\"p_kw\"].values * 1e-3 * gen_is[\"scaling\"].values\r\n ppc[\"gen\"][gen_idx_ppc, VG] = gen_is[\"vm_pu\"].values\r\n\r\n # set bus values for generator buses\r\n gen_buses = bus_lookup[gen_is[\"bus\"].values]\r\n ppc[\"bus\"][gen_buses, VM] = gen_is[\"vm_pu\"].values\r\n\r\n _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask)\r\n _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default)\r\n\r\n # add extended ward pv node data\r\n if xw_end > gen_end:\r\n # ToDo: this must be tested in combination with recycle. Maybe the placement of the updated value in ppc[\"gen\"]\r\n # ToDo: is wrong. -> I'll better raise en error\r\n raise NotImplementedError(\"xwards in combination with recycle is not properly implemented\")\r\n # _build_pp_xward(net, ppc, gen_end, xw_end, q_lim_default,\r\n # update_lookup=False)\r\n\r\n\r\ndef _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask):\r\n # Note: Pypower has generator reference system, pandapower uses load reference\r\n # system (max <-> min)\r\n\r\n delta = net[\"_options\"][\"delta\"]\r\n\r\n if \"max_q_kvar\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, QMIN] = -net[\"gen\"][\"max_q_kvar\"].values[gen_is_mask] * 1e-3 - delta\r\n if \"min_q_kvar\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, QMAX] = -net[\"gen\"][\"min_q_kvar\"].values[gen_is_mask] * 1e-3 + delta\r\n\r\n\r\ndef _copy_p_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask):\r\n delta = net[\"_options\"][\"delta\"]\r\n\r\n if \"max_p_kw\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, PMIN] = -net[\"gen\"][\"max_p_kw\"].values[gen_is_mask] * 1e-3 + delta\r\n if \"min_p_kw\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, PMAX] = -net[\"gen\"][\"min_p_kw\"].values[gen_is_mask] * 1e-3 - delta\r\n\r\n\r\ndef _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default):\r\n # Note: Pypower has generator reference system, pandapower uses load reference system (max <-> min)\r\n max_q_kvar = ppc[\"gen\"][eg_end:gen_end, [QMIN]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=np.isnan(max_q_kvar))\r\n ppc[\"gen\"][eg_end:gen_end, [QMIN]] = max_q_kvar\r\n\r\n min_q_kvar = ppc[\"gen\"][eg_end:gen_end, [QMAX]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=np.isnan(min_q_kvar))\r\n ppc[\"gen\"][eg_end:gen_end, [QMAX]] = min_q_kvar\r\n\r\n\r\ndef _replace_nans_with_default_p_limits_in_ppc(ppc, eg_end, gen_end, p_lim_default):\r\n # Note: Pypower has generator reference system, pandapower uses load reference system (max <-> min)\r\n max_p_kw = ppc[\"gen\"][eg_end:gen_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][eg_end:gen_end, [PMIN]] = max_p_kw\r\n\r\n min_p_kw = ppc[\"gen\"][eg_end:gen_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][eg_end:gen_end, [PMAX]] = min_p_kw\r\n\r\n\r\ndef _check_voltage_setpoints_at_same_bus(ppc):\r\n # generator buses:\r\n gen_bus = ppc['gen'][:, GEN_BUS].astype(int)\r\n # generator setpoints:\r\n gen_vm = ppc['gen'][:, VG]\r\n if _different_values_at_one_bus(gen_bus, gen_vm):\r\n raise UserWarning(\"Generators with different voltage setpoints connected to the same bus\")\r\n\r\ndef _check_voltage_angles_at_same_bus(net, ppc):\r\n gen_va = net.ext_grid.va_degree[net._is_elements[\"ext_grid\"]].values\r\n eg_gens = net._pd2ppc_lookups[\"ext_grid\"][net.ext_grid.index[net._is_elements[\"ext_grid\"]]]\r\n gen_bus = ppc[\"gen\"][eg_gens, GEN_BUS].astype(int)\r\n if _different_values_at_one_bus(gen_bus, gen_va):\r\n raise UserWarning(\"Ext grids with different voltage angle setpoints connected to the same bus\")\r\n\r\n\r\ndef _different_values_at_one_bus(buses, values):\r\n \"\"\"\r\n checks if there are different values in any of the\r\n\r\n \"\"\"\r\n # buses with one or more generators and their index\r\n unique_bus, index_first_bus = np.unique(buses, return_index=True)\r\n\r\n # voltage setpoint lookup with the voltage of the first occurence of that bus\r\n first_values = -np.ones(buses.max() + 1)\r\n first_values[unique_bus] = values[index_first_bus]\r\n\r\n # generate voltage setpoints where all generators at the same bus\r\n # have the voltage of the first generator at that bus\r\n values_equal = first_values[buses]\r\n\r\n return not np.array_equal(values, values_equal)\r\n", "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nimport numpy as np\nimport pytest\n\nimport pandapower as pp\nfrom pandapower.test.loadflow.result_test_network_generator import result_test_network_generator\n\n\[email protected](scope=\"session\")\ndef simple_network():\n net = pp.create_empty_network()\n b1 = pp.create_bus(net, name=\"bus1\", vn_kv=10.)\n pp.create_ext_grid(net, b1)\n b2 = pp.create_bus(net, name=\"bus2\", geodata=(1, 2))\n b3 = pp.create_bus(net, name=\"bus3\", geodata=(1, 3))\n b4 = pp.create_bus(net, name=\"bus4\", vn_kv=10.)\n pp.create_transformer(net, b4, b2,\n std_type=\"0.25 MVA 10/0.4 kV\",\n name=None, in_service=True, index=None)\n pp.create_line(net, b2, b3, 1, name=\"line1\",\n std_type=\"NAYY 4x150 SE\",\n geodata=np.array([[1, 2], [3, 4]]))\n pp.create_line(net, b1, b4, 1, name=\"line2\",\n std_type=\"NAYY 4x150 SE\")\n pp.create_load(net, b2, p_kw=10, q_kvar=0, name=\"load1\")\n pp.create_load(net, b3, p_kw=40, q_kvar=2, name=\"load2\")\n pp.create_gen(net, 3, p_kw=-200., vm_pu=1.0)\n pp.create_sgen(net, 2, p_kw=-50, sn_kva=100)\n return net\n\n\n\[email protected](scope=\"session\")\ndef result_test_network():\n for net in result_test_network_generator():\n pass\n pp.runpp(net, trafo_model=\"t\", trafo_loading=\"current\")\n return net\n\nif __name__ == '__main__':\n net = result_test_network()\n # pp.rundcpp(net)" ]
[ [ "numpy.array_equal", "numpy.unique", "numpy.isnan", "pandas.DataFrame", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
miquelmn/nn_interpretability
[ "2b5d2b4102016189743e09f1f3a56f2ecddfde98", "2b5d2b4102016189743e09f1f3a56f2ecddfde98" ]
[ "nn_interpretability/model/model_repository.py", "nn_interpretability/interpretation/lrp/lrp_eps.py" ]
[ "import os\nimport torch\nfrom pathlib import Path\n\nfrom nn_interpretability.model.definition.am_mnist_classifier import AMCNN\nfrom nn_interpretability.model.definition.mc_dropout_cnn import CNN_Dropout\nfrom nn_interpretability.model.definition.general_mnist_cnn import GeneralCNN\nfrom nn_interpretability.model.definition.mnist_generator import MNISTGenerator\nfrom nn_interpretability.model.definition.mnist_discriminator import MNISTDiscriminator\nfrom nn_interpretability.model.definition.cam_mnist_classifier import CAMMNISTClassifier\nfrom nn_interpretability.model.definition.pretrained_dc_generator import PretrainedDCGANGenerator\nfrom nn_interpretability.model.definition.cam_mnist_classifier_2 import CAMMNISTExtendedClassifier\n\n\nclass ModelRepository:\n MODELS_PATH = str(Path(__file__).parent.parent.parent.joinpath('models')) + \"/\"\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n @staticmethod\n def get_general_mnist_cnn(path: str = None):\n model = GeneralCNN()\n\n if path is not None:\n if os.path.exists(ModelRepository.MODELS_PATH + path):\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def get_cnn_dropout(path: str = None):\n model = CNN_Dropout()\n\n if path is not None:\n if os.path.exists(ModelRepository.MODELS_PATH + path):\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n \n @staticmethod\n def get_cam_classifier(path: str = None):\n model = CAMMNISTClassifier()\n\n if path is not None:\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def get_cam_extended_classifier(path: str = None):\n model = CAMMNISTExtendedClassifier()\n\n if path is not None:\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def get_am_classifier(path: str = None):\n model = AMCNN()\n\n if path is not None:\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def get_pretrained_dcgan_generator():\n \"\"\"\n Source of the pretrained model is:\n\n https://github.com/csinva/gan-vae-pretrained-pytorch\n :return:\n \"\"\"\n path = 'pretrained_dcgan_generator.pth'\n\n model = PretrainedDCGANGenerator()\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def get_mnist_generator(latent_dim: int = 128, path: str = None):\n model = MNISTGenerator(latent_dim=latent_dim)\n\n if path is not None:\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def get_mnist_discriminator(path: str = None):\n model = MNISTDiscriminator()\n\n if path is not None:\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def save(model, model_name):\n torch.save(model.state_dict(), ModelRepository.MODELS_PATH + model_name)\n return model\n\n @staticmethod\n def _load(model, model_name):\n model.load_state_dict(torch.load(ModelRepository.MODELS_PATH + model_name, map_location=ModelRepository.device))\n return model.to(ModelRepository.device)\n", "import torch\nimport copy\nimport torch.nn as nn\nimport numpy as np\n\nfrom torch.nn import Module\nfrom torchvision.transforms import transforms\nfrom nn_interpretability.interpretation.lrp.lrp_base import LRPBase\n\n\nclass LRPEpsilon(LRPBase):\n \"\"\"\n Implements the decision-based interpretability method \"LRP-epsilon\"\n as outlined in the paper \"Layer-Wise Relevance Propagation: An Overview\"\n by Montavon et al.\n\n http://iphome.hhi.de/samek/pdf/MonXAI19.pdf\n \"\"\" \n def __init__(self, model: Module, classes: [str], preprocess: transforms.Compose, \n visualize_layer=0, input_z_beta=True, treat_avgpool=False):\n \"\"\"\n :param model: The model the decisions of which needs to be interpreted.\n :param classes: A collection of all classes that the given model can classify.\n :param preprocess: The preprocessing functions that need to be invoked for the model input.\n :param visualize_layer: Select the layer we want to visualize heatmap.\n :param input_z_beta: Switch for using LRP z^beta rule at input layers.\n :param treat_avgpool: Switch for treat max pooling like average pooling described in Montavon's paper.\n \"\"\" \n super(LRPEpsilon, self).__init__(model, classes, preprocess)\n self.visualize_layer = visualize_layer\n self.input_z_beta = input_z_beta\n self.treat_avgpool = treat_avgpool\n\n def interpret(self, x):\n # Create a list to collect all layers\n x = x.detach().to(self.device)\n self.A = [x]\n layers = self.to_conv(x)\n L = len(layers)\n for l in range(L):\n self.A.append(layers[l].forward(self.A[l]))\n \n _, self.predicted_class = torch.max(self.A[-1], dim=1)\n if self.classes == 'predicted':\n self.classes = self.predicted_class.item()\n \n # Relevance score\n num_cls = self.A[-1].size(1)\n T = torch.FloatTensor((1.0 * (np.arange(num_cls) == self.classes).reshape([1,num_cls,1,1])))\n self.Relevance = [None] * L + [self.A[-1].detach() * T.to(self.device)]\n \n # Uncomment this and comment \"self.LRP_layers = layers\" if you have enough GPU memory. \n # This is to make sure that when we treat max pooling as average pooling we don't change the original layer\n #self.LRP_layers = copy.deepcopy(layers) \n self.LRP_layers = layers\n \n # LRP backward passincr_p\n for l in range(self.visualize_layer, L)[::-1]:\n self.A[l] = (self.A[l].detach().to(self.device)).requires_grad_(True)\n incr = lambda z: z + 1e-9 + 0.1 * ((z**2).mean()**.5).detach()\n\n if isinstance(self.LRP_layers[l],torch.nn.MaxPool2d) or \\\n isinstance(self.LRP_layers[l],torch.nn.AdaptiveAvgPool2d):\n\n if self.treat_avgpool:\n #treat max pooling like average pooling described in Montavon's paper\n self.LRP_layers[l] = torch.nn.AvgPool2d(2)\n \n z = incr(self.LRP_layers[l].forward(self.A[l])) # step 1 \n s = (self.Relevance[l+1] / z).detach() # step 2 \n (z * s).sum().backward(); c = self.A[l].grad # step 3\n self.Relevance[l] = (self.A[l] * c).detach() # step 4 \n\n elif isinstance(self.LRP_layers[l],torch.nn.Conv2d):\n\n z = incr(self.LRP_layers[l].forward(self.A[l])) # step 1\n s = (self.Relevance[l+1]/z).detach() # step 2\n (z * s).sum().backward(); c = self.A[l].grad # step 3\n self.Relevance[l] = (self.A[l] * c).detach() # step 4\n\n else:\n self.Relevance[l] = self.Relevance[l+1]\n \n if (self.input_z_beta == True) and (self.visualize_layer == 0): self.lrp_pixel()\n\n return self.Relevance[self.visualize_layer]\n\n\n\n\n" ]
[ [ "torch.cuda.is_available", "torch.load" ], [ "numpy.arange", "torch.nn.AvgPool2d", "torch.max" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tuahk/NiaPy
[ "c863d801fda8e1949a3ca716a4de7c7ca3d0ea16", "c863d801fda8e1949a3ca716a4de7c7ca3d0ea16", "c863d801fda8e1949a3ca716a4de7c7ca3d0ea16" ]
[ "NiaPy/algorithms/basic/gso.py", "NiaPy/algorithms/basic/ba.py", "NiaPy/algorithms/basic/gsa.py" ]
[ "# encoding=utf8\n# pylint: disable=mixed-indentation, trailing-whitespace, line-too-long, multiple-statements, attribute-defined-outside-init, logging-not-lazy, no-self-use, redefined-builtin, singleton-comparison, unused-argument, arguments-differ, no-else-return\nimport logging\nfrom scipy.spatial.distance import euclidean\nfrom numpy import full, apply_along_axis, argmin, copy, sum, inf, fmax, pi, where\nfrom NiaPy.algorithms.algorithm import Algorithm\n\nlogging.basicConfig()\nlogger = logging.getLogger('NiaPy.algorithms.basic')\nlogger.setLevel('INFO')\n\n__all__ = ['GlowwormSwarmOptimization', 'GlowwormSwarmOptimizationV1', 'GlowwormSwarmOptimizationV2', 'GlowwormSwarmOptimizationV3']\n\nclass GlowwormSwarmOptimization(Algorithm):\n\tr\"\"\"Implementation of glowwarm swarm optimization.\n\n\t**Algorithm:** Glowwarm Swarm Optimization Algorithm\n\n\t**Date:** 2018\n\n\t**Authors:** Klemen Berkovič\n\n\t**License:** MIT\n\n\t**Reference URL:** https://www.springer.com/gp/book/9783319515946\n\n\t**Reference paper:** Kaipa, Krishnanand N., and Debasish Ghose. Glowworm swarm optimization: theory, algorithms, and applications. Vol. 698. Springer, 2017.\n\t\"\"\"\n\tdef __init__(self, **kwargs):\n\t\tif kwargs.get('name', None) == None: Algorithm.__init__(self, name='GlowwormSwarmOptimization', sName='GSO', **kwargs)\n\t\telse: Algorithm.__init__(self, **kwargs)\n\n\tdef setParameters(self, n=25, l0=5, nt=5, rho=0.4, gamma=0.6, beta=0.08, s=0.03, **ukwargs):\n\t\tr\"\"\"Set the arguments of an algorithm.\n\n\t\t**Arguments:**\n\n\t\tn {integer} -- number of glowworms in population\n\n\t\tl0 {real} -- initial luciferin quantity for each glowworm\n\n\t\tnt {real} --\n\n\t\trs {real} -- maximum sensing range\n\n\t\trho {real} -- luciferin decay constant\n\n\t\tgamma {real} -- luciferin enhancement constant\n\n\t\tbeta {real} --\n\n\t\ts {real} --\n\t\t\"\"\"\n\t\tself.n, self.l0, self.nt, self.rho, self.gamma, self.beta, self.s = n, l0, nt, rho, gamma, beta, s\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))\n\n\tdef randMove(self, i):\n\t\tj = i\n\t\twhile i == j: j = self.randint(self.n)\n\t\treturn j\n\n\tdef getNeighbors(self, i, r, GS, L):\n\t\tN = full(self.n, 0)\n\t\tfor j, gw in enumerate(GS): N[j] = 1 if i != j and euclidean(GS[i], gw) <= r and L[i] >= L[j] else 0\n\t\treturn N\n\n\tdef probabilityes(self, i, N, L):\n\t\td, P = sum(L[where(N == 1)] - L[i]), full(self.n, .0)\n\t\tfor j in range(self.n): P[i] = ((L[j] - L[i]) / d) if N[j] == 1 else 0\n\t\treturn P\n\n\tdef moveSelect(self, pb, i):\n\t\tr, b_l, b_u = self.rand(), 0, 0\n\t\tfor j in range(self.n):\n\t\t\tb_l, b_u = b_u, b_u + pb[i]\n\t\t\tif b_l < r < b_u: return j\n\t\treturn self.randint(self.n)\n\n\tdef calcLuciferin(self, L, GS_f): return (1 - self.rho) * L + self.gamma * GS_f\n\n\tdef rangeUpdate(self, R, N, rs): return R + self.beta * (self.nt - sum(N))\n\n\tdef getBest(self, GS, GS_f, xb, xb_f):\n\t\tib = argmin(GS_f)\n\t\tif GS_f[ib] < xb_f: return GS[ib], GS_f[ib]\n\t\telse: return xb, xb_f\n\n\tdef runTask(self, task):\n\t\trs = euclidean(full(task.D, 0), task.bRange)\n\t\tGS, GS_f, L, R = self.uniform(task.Lower, task.Upper, [self.n, task.D]), full(self.n, inf), full(self.n, self.l0), full(self.n, rs)\n\t\txb, xb_f = None, inf\n\t\twhile not task.stopCondI():\n\t\t\tGSo, Ro, GS_f = copy(GS), copy(R), apply_along_axis(task.eval, 1, GS)\n\t\t\txb, xb_f = self.getBest(GS, GS_f, xb, xb_f)\n\t\t\tL = self.calcLuciferin(L, GS_f)\n\t\t\tN = [self.getNeighbors(i, Ro[i], GSo, L) for i in range(self.n)]\n\t\t\tP = [self.probabilityes(i, N[i], L) for i in range(self.n)]\n\t\t\tj = [self.moveSelect(P[i], i) for i in range(self.n)]\n\t\t\tfor i in range(self.n): GS[i] = task.repair(GSo[i] + self.s * ((GSo[j[i]] - GSo[i]) / (euclidean(GSo[j[i]], GSo[i]) + 1e-31)))\n\t\t\tfor i in range(self.n): R[i] = max(0, min(rs, self.rangeUpdate(Ro[i], N[i], rs)))\n\t\treturn xb, xb_f\n\nclass GlowwormSwarmOptimizationV1(GlowwormSwarmOptimization):\n\tr\"\"\"Implementation of glowwarm swarm optimization.\n\n\t**Algorithm:** Glowwarm Swarm Optimization Algorithm\n\n\t**Date:** 2018\n\n\t**Authors:** Klemen Berkovič\n\n\t**License:** MIT\n\n\t**Reference URL:** https://www.springer.com/gp/book/9783319515946\n\n\t**Reference paper:** Kaipa, Krishnanand N., and Debasish Ghose. Glowworm swarm optimization: theory, algorithms, and applications. Vol. 698. Springer, 2017.\n\t\"\"\"\n\tdef __init__(self, **kwargs): GlowwormSwarmOptimization.__init__(self, name='GlowwormSwarmOptimizationV1', sName='GSOv1', **kwargs)\n\n\tdef setParameters(self, **kwargs):\n\t\tself.__setParams(**kwargs)\n\t\tGlowwormSwarmOptimization.setParameters(self, **kwargs)\n\n\tdef __setParams(self, alpha=0.2, **ukwargs):\n\t\tr\"\"\"Set the arguments of an algorithm.\n\n\t\t**Arguments:**\n\n\t\talpha {real} --\n\t\t\"\"\"\n\t\tself.alpha = alpha\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))\n\n\tdef calcLuciferin(self, L, GS_f): return fmax(0, (1 - self.rho) * L + self.gamma * GS_f)\n\n\tdef rangeUpdate(self, R, N, rs): return rs / (1 + self.beta * (sum(N) / (pi * rs ** 2)))\n\nclass GlowwormSwarmOptimizationV2(GlowwormSwarmOptimization):\n\tr\"\"\"Implementation of glowwarm swarm optimization.\n\n\t**Algorithm:** Glowwarm Swarm Optimization Algorithm\n\n\t**Date:** 2018\n\n\t**Authors:** Klemen Berkovič\n\n\t**License:** MIT\n\n\t**Reference URL:** https://www.springer.com/gp/book/9783319515946\n\n\t**Reference paper:** Kaipa, Krishnanand N., and Debasish Ghose. Glowworm swarm optimization: theory, algorithms, and applications. Vol. 698. Springer, 2017.\n\t\"\"\"\n\tdef __init__(self, **kwargs): GlowwormSwarmOptimization.__init__(self, name='GlowwormSwarmOptimizationV2', sName='GSOv2', **kwargs)\n\n\tdef setParameters(self, **kwargs):\n\t\tself.__setParams(alpha=kwargs.pop('alpha', 0.2), **kwargs)\n\t\tGlowwormSwarmOptimization.setParameters(self, **kwargs)\n\n\tdef __setParams(self, alpha=0.2, **ukwargs):\n\t\tr\"\"\"Set the arguments of an algorithm.\n\n\t\t**Arguments:**\n\n\t\tbeta1 {real} --\n\n\t\ts {real} --\n\t\t\"\"\"\n\t\tself.alpha = alpha\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))\n\n\tdef rangeUpdate(self, P, N, rs): return self.alpha + (rs - self.alpha) / (1 + self.beta * sum(N))\n\nclass GlowwormSwarmOptimizationV3(GlowwormSwarmOptimization):\n\tr\"\"\"Implementation of glowwarm swarm optimization.\n\n\t**Algorithm:** Glowwarm Swarm Optimization Algorithm\n\n\t**Date:** 2018\n\n\t**Authors:** Klemen Berkovič\n\n\t**License:** MIT\n\n\t**Reference URL:** https://www.springer.com/gp/book/9783319515946\n\n\t**Reference paper:** Kaipa, Krishnanand N., and Debasish Ghose. Glowworm swarm optimization: theory, algorithms, and applications. Vol. 698. Springer, 2017.\n\t\"\"\"\n\tdef __init__(self, **kwargs): GlowwormSwarmOptimization.__init__(self, name='GlowwormSwarmOptimizationV2', sName='GSOv2', **kwargs)\n\n\tdef setParameters(self, **kwargs):\n\t\tself.__setParams(beta1=kwargs.pop('beta1', 0.2), **kwargs)\n\t\tGlowwormSwarmOptimization.setParameters(self, **kwargs)\n\n\tdef __setParams(self, beta1=0.2, **ukwargs):\n\t\tr\"\"\"Set the arguments of an algorithm.\n\n\t\t**Arguments:**\n\n\t\tbeta1 {real} --\n\n\t\ts {real} --\n\t\t\"\"\"\n\t\tself.beta1 = beta1\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))\n\n\tdef rangeUpdate(self, R, N, rs): return R + (self.beta * sum(N)) if sum(N) < self.nt else (-self.beta1 * sum(N))\n\n# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3\n", "# encoding=utf8\n# pylint: disable=mixed-indentation, multiple-statements, attribute-defined-outside-init, logging-not-lazy, no-self-use, line-too-long, singleton-comparison, arguments-differ\nimport logging\nfrom numpy import full, apply_along_axis, argmin\nfrom NiaPy.algorithms.algorithm import Algorithm\n\nlogging.basicConfig()\nlogger = logging.getLogger('NiaPy.algorithms.basic')\nlogger.setLevel('INFO')\n\n__all__ = ['BatAlgorithm']\n\nclass BatAlgorithm(Algorithm):\n\tr\"\"\"Implementation of Bat algorithm.\n\n\t**Algorithm:** Bat algorithm\n\n\t**Date:** 2015\n\n\t**Authors:** Iztok Fister Jr., Marko Burjek and Klemen Berkovič\n\n\t**License:** MIT\n\n\t**Reference paper:**\n\tYang, Xin-She. \"A new metaheuristic bat-inspired algorithm.\"\n\tNature inspired cooperative strategies for optimization (NICSO 2010).\n\tSpringer, Berlin, Heidelberg, 2010. 65-74.\n\t\"\"\"\n\tdef __init__(self, **kwargs):\n\t\tr\"\"\"**__init__(self, D, NP, nFES, A, r, Qmin, Qmax, benchmark)**.\n\n\t\t**See**:\n\t\tAlgorithm.__init__(self, **kwargs)\n\t\t\"\"\"\n\t\tif kwargs.get('name', None) == None: Algorithm.__init__(self, name=kwargs.get('name', 'BatAlgorithm'), sName=kwargs.get('sName', 'BA'), **kwargs)\n\t\telse: Algorithm.__init__(self, **kwargs)\n\n\tdef setParameters(self, NP, A, r, Qmin, Qmax, **ukwargs):\n\t\tr\"\"\"Set the parameters of the algorithm.\n\n\t\t**Arguments:**\n\n\t\tNP {integer} -- population size\n\n\t\tA {decimal} -- loudness\n\n\t\tr {decimal} -- pulse rate\n\n\t\tQmin {decimal} -- minimum frequency\n\n\t\tQmax {decimal} -- maximum frequency\n\t\t\"\"\"\n\t\tself.NP, self.A, self.r, self.Qmin, self.Qmax = NP, A, r, Qmin, Qmax\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))\n\n\tdef runTask(self, task):\n\t\tr\"\"\"Run algorithm with initialized parameters.\n\n\t\t**Return:**\n\n\t\t{decimal} -- coordinates of minimal found objective function\n\n\t\t{decimal} -- minimal value found of objective function\n\t\t\"\"\"\n\t\tS, Q, v = full([self.NP, task.D], 0.0), full(self.NP, 0.0), full([self.NP, task.D], 0.0)\n\t\tSol = task.Lower + task.bRange * self.uniform(0, 1, [self.NP, task.D])\n\t\tFitness = apply_along_axis(task.eval, 1, Sol)\n\t\tj = argmin(Fitness)\n\t\tbest, f_min = Sol[j], Fitness[j]\n\t\twhile not task.stopCond():\n\t\t\tfor i in range(self.NP):\n\t\t\t\tQ[i] = self.Qmin + (self.Qmax - self.Qmin) * self.uniform(0, 1)\n\t\t\t\tv[i] = v[i] + (Sol[i] - best) * Q[i]\n\t\t\t\tS[i] = Sol[i] + v[i]\n\t\t\t\tS[i] = task.repair(S[i])\n\t\t\t\tif self.rand() > self.r:\n\t\t\t\t\tS[i] = best + 0.001 * self.normal(0, 1, task.D)\n\t\t\t\t\tS[i] = task.repair(S[i])\n\t\t\t\tFnew = task.eval(S[i])\n\t\t\t\tif (Fnew <= Fitness[i]) and (self.rand() < self.A): Sol[i], Fitness[i] = S[i], Fnew\n\t\t\t\tif Fnew <= f_min: best, f_min = S[i], Fnew\n\t\treturn best, f_min\n\n# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3\n", "# encoding=utf8\n# pylint: disable=mixed-indentation, multiple-statements, line-too-long, unused-argument, no-self-use, no-self-use, attribute-defined-outside-init, logging-not-lazy, len-as-condition, singleton-comparison, arguments-differ, redefined-builtin\nimport logging\nfrom numpy import apply_along_axis, asarray, inf, argmin, argmax, sum, full\nfrom NiaPy.algorithms.algorithm import Algorithm\n\n__all__ = ['GravitationalSearchAlgorithm']\n\nlogging.basicConfig()\nlogger = logging.getLogger('NiaPy.algorithms.basic')\nlogger.setLevel('INFO')\n\nclass GravitationalSearchAlgorithm(Algorithm):\n\tr\"\"\"Implementation of gravitational search algorithm.\n\n\t**Algorithm:** Gravitational Search Algorithm\n\n\t**Date:** 2018\n\n\t**Author:** Klemen Berkoivč\n\n\t**License:** MIT\n\n\t**Reference URL:** https://doi.org/10.1016/j.ins.2009.03.004\n\n\t**Reference paper:** Esmat Rashedi, Hossein Nezamabadi-pour, Saeid Saryazdi, GSA: A Gravitational Search Algorithm, Information Sciences, Volume 179, Issue 13, 2009, Pages 2232-2248, ISSN 0020-0255\n\t\"\"\"\n\tdef __init__(self, **kwargs):\n\t\tif kwargs.get('name', None) == None: Algorithm.__init__(self, name=kwargs.get('name', 'DifferentialEvolutionAlgorithm'), sName=kwargs.get('sName', 'DE'), **kwargs)\n\t\telse: Algorithm.__init__(self, **kwargs)\n\n\tdef setParameters(self, NP=40, G_0=2.467, epsilon=1e-17, **ukwargs):\n\t\tr\"\"\"Set the algorithm parameters.\n\n\t\t**Arguments:**\n\n\t\tNP {integer} -- number of planets in population\n\n\t\tG_0 {real} -- starting gravitational constant\n\t\t\"\"\"\n\t\tself.NP, self.G_0, self.epsilon = NP, G_0, epsilon\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))\n\n\tdef G(self, t): return self.G_0 / t\n\n\tdef d(self, x, y, ln=2): return sum((x - y) ** ln) ** (1 / ln)\n\n\tdef runTask(self, task):\n\t\tX, v = self.uniform(task.Lower, task.Upper, [self.NP, task.D]), full([self.NP, task.D], 0.0)\n\t\txb, xb_f = None, inf\n\t\twhile not task.stopCondI():\n\t\t\tX_f = apply_along_axis(task.eval, 1, X)\n\t\t\tib, iw = argmin(X_f), argmax(X_f)\n\t\t\tif xb_f > X_f[ib]: xb, xb_f = X[ib], X_f[ib]\n\t\t\tm = (X_f - X_f[iw]) / (X_f[ib] - X_f[iw])\n\t\t\tM = m / sum(m)\n\t\t\tFi = asarray([[self.G(task.Iters) * ((M[i] * M[j]) / (self.d(X[i], X[j]) + self.epsilon)) * (X[j] - X[i]) for j in range(len(M))] for i in range(len(M))])\n\t\t\tF = sum(self.rand([self.NP, task.D]) * Fi, axis=1)\n\t\t\ta = F.T / (M + self.epsilon)\n\t\t\tv = self.rand([self.NP, task.D]) * v + a.T\n\t\t\tX = apply_along_axis(task.repair, 1, X + v)\n\t\treturn xb, xb_f\n\n# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3\n" ]
[ [ "numpy.full", "scipy.spatial.distance.euclidean", "numpy.copy", "numpy.apply_along_axis", "numpy.fmax", "numpy.argmin", "numpy.where", "numpy.sum" ], [ "numpy.apply_along_axis", "numpy.argmin", "numpy.full" ], [ "numpy.full", "numpy.argmax", "numpy.apply_along_axis", "numpy.argmin", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
d-ks/gym_torcs_kai
[ "b9e1659a18ea8a788d0c6aeb7b1111c0284b23ac" ]
[ "gt_kai.py" ]
[ "# Gym-TORCS-Kai Environment for Reinforcement Learning in TORCS\n# original author : Naoto Yoshida\n# (https://github.com/ugo-nama-kun/gym_torcs)\n# modified version author : Daiko Kishikawa\n#\n# This environment is under modification. (2019.12)\n#\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\n\nimport numpy as np\n\nimport sys\n\nsys.path.append(\"./gym_torcs_kai\")\n\nimport snakeoil3_gym as snakeoil3\n\nimport os\nimport time\n\n\nclass TorcsKaiEnv(gym.Env):\n \n # the speed limit starts when the number of steps exceeds this\n terminal_judge_start = 500\n\n # episode terminates when the car is running slower than this limit\n termination_limit_progress = 5\n \n # whether to initialize when resetting the environment\n initial_reset = True\n\n def __init__(self, throttle=False, gear_change=False):\n \n print(\"=== Hello, this is Gym-TORCS-Kai. ===\")\n \n ############################ PARAMETERS OF DRIVING ############################\n \"\"\" throttle (bool) : usage of the throttle control in TORCS. \"\"\"\n \"\"\" gear_change (bool) : usage of the gear control in TORCS. \"\"\"\n \"\"\" obsdim (int) : the number of observation (state input) dimensions.\"\"\"\n # Currently, three types of dimensions are supported: \"2\", \"31\", \"79\".\n # \"2\" : the minimum number of dimensions required for driving.\n # \"31\" : the number of dimensions required for a single agent to drive normally.\n # \"79\" : the number of dimensions using all available inputs.\n \"\"\" maximum_distance (float) : the maximum distance when finish driving. \"\"\"\n \"\"\" default_speed (float) : the target speed for acceleration/deceleration. \"\"\"\n \n self.throttle = throttle\n self.gear_change = gear_change\n \n self.obsdim = 31\n self.maximum_distance = 1908.32\n self.default_speed = 100\n \n ##################################################################################\n \n print(\"--> throttle : \", self.throttle)\n print(\"--> gear : \", self.gear_change)\n print(\"--> dim. of observ. : \", self.obsdim)\n print(\"--> max. dist. : \", self.maximum_distance, \" m\")\n print(\"--> targ. speed : \", self.default_speed, \"km/h\")\n \n # Initialization of the driving in TORCS.\n self.initial_run = True\n \n # variable for calculating Y-axis acceleration\n self.speedY = 0\n self.time = 0\n \n # variable for recording the current number of steps\n self.time_step = 0\n \n # the range of reward function\n self.reward_range = (-10, 10)\n\n self.testmode = False\n\n # lists for recording vehicle status\n self.Yaclist = []\n self.poshis = []\n self.anglehis = []\n self.sphis = []\n \n # launch TORCS system\n os.system(\"pkill torcs\")\n time.sleep(0.5)\n\n if self.obsdim == 79:\n os.system(\"torcs &\")\n else:\n os.system(\"torcs -nofuel -nodamage -nolaptime &\")\n time.sleep(0.5)\n os.system(\"sh ./gym_torcs_kai/autostart.sh\")\n time.sleep(0.5)\n\n \"\"\"\n # Modify here if you use multiple tracks in the environment\n self.client = snakeoil3.Client(p=3101, vision=False) # Open new UDP in vtorcs\n self.client.MAX_STEPS = np.inf\n client = self.client\n client.get_servers_input() # Get the initial input from torcs\n obs = client.S.d # Get the current full-observation from torcs\n \"\"\"\n \n # definitions of action space ranges\n if throttle is False:\n self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,))\n else:\n self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,))\n \n # definitions of observation space ranges\n if self.obsdim == 79:\n high = np.array([np.pi, # angle\n np.inf, # curLapTime\n np.inf, # damage\n np.inf, # distFromStart\n np.inf, # distRaced\n\n # focus (5 dim.)\n 200, 200, 200, 200, 200,\n\n np.inf, # fuel\n 6, # gear\n np.inf, # lastLapTime\n\n # opponents (36 dim.)\n 200, 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200, 200,\n\n np.inf, # racePos\n np.inf, # rpm\n np.inf, # speedX\n np.inf, # speedY\n np.inf, # speedZ\n\n # track (19 dim.)\n 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200,\n 200, 200, 200, 200,\n\n np.inf, # trackPos\n\n # wheelSpinVel (4 dim.)\n np.inf, np.inf, np.inf, np.inf,\n\n np.inf, # z\n ])\n\n low = np.array([-np.pi, # angle\n 0, # curLapTime\n 0, # damage\n 0, # distFromStart\n 0, # distRaced\n\n # focus (5 dim.)\n 0, 0, 0, 0, 0,\n\n 0, # fuel\n -1, # gear\n 0, # lastLapTime\n\n # opponents (36 dim.)\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n\n 1, # racePos\n 0, # rpm\n -np.inf, # speedX\n -np.inf, # speedY\n -np.inf, # speedZ\n\n # track (19 dim.)\n 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0,\n 0, 0, 0, 0,\n\n -np.inf, # trackPos\n\n # wheelSpinVel (4 dim.)\n 0, 0, 0, 0,\n\n -np.inf, # z\n ])\n\n elif self.obsdim == 2:\n high = np.array([np.pi, # angle\n np.inf]) # trackPos\n\n low = np.array([-np.pi, # angle\n -np.inf]) # trackPos\n\n elif self.obsdim == 31:\n\n high = np.array([np.pi, # angle\n 6, # gear\n np.inf, # rpm\n np.inf, # speedX\n np.inf, # speedY\n np.inf, # speedZ\n # track (19 dim.)\n 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200,\n 200, 200, 200, 200,\n np.inf, # trackPos\n # wheelSpinVel (4 dim.)\n np.inf, np.inf, np.inf, np.inf,\n np.inf, # z\n ])\n\n low = np.array([-np.pi, # angle\n -1, # gear\n 0, # rpm\n -np.inf, # speedX\n -np.inf, # speedY\n -np.inf, # speedZ\n # track (19 dim.)\n 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0,\n 0, 0, 0, 0,\n -np.inf, # trackPos\n # wheelSpinVel (4 dim.)\n 0, 0, 0, 0,\n -np.inf, # z\n ])\n else:\n low = None\n high = None\n\n self.observation_space = spaces.Box(low=low, high=high)\n\n # For evaluation episodes, set to “test mode” to not display logs.\n def testset(self, test):\n self.testmode = test\n\n # Set learning parameter\n def set_params(self, throttle, gear, dim, max_dist, targ_speed):\n #params: [throttle, gear, dim, max_dist, targ_speed]\n self.throttle = throttle\n self.gear_change = gear\n self.obsdim = dim\n self.maximum_distance = max_dist\n self.default_speed = targ_speed\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n # \"step\" function\n def step(self, u):\n # convert thisAction to the actual torcs actionstr\n client = self.client\n\n this_action = self.agent_to_torcs(u)\n\n # apply actions in TORCS\n action_torcs = client.R.d\n\n # steering control from the agent\n action_torcs[\"steer\"] = this_action[\"steer\"] # in [-1, 1]\n\n # simple automatic throttle control by Snakeoil\n if self.throttle is False:\n target_speed = self.default_speed\n if client.S.d[\"speedX\"] < target_speed - (client.R.d[\"steer\"] * 50):\n if client.R.d[\"accel\"] + 0.1 <= 1:\n client.R.d[\"accel\"] += 0.1\n else:\n if client.R.d[\"accel\"] - 0.1 >= 0:\n client.R.d[\"accel\"] -= 0.1\n\n if client.S.d[\"speedX\"] < 10:\n if (client.S.d[\"speedX\"] + 0.1) != 0:\n client.R.d[\"accel\"] += 1 / (client.S.d[\"speedX\"] + 0.1)\n\n # traction control system\n if (client.S.d[\"wheelSpinVel\"][2] + client.S.d[\"wheelSpinVel\"][3]) - (\n client.S.d[\"wheelSpinVel\"][0] + client.S.d[\"wheelSpinVel\"][1]\n ) > 5:\n action_torcs[\"accel\"] -= 0.2\n else:\n action_torcs[\"accel\"] = this_action[\"accel\"]\n\n # gear control from agent\n if self.gear_change is True:\n action_torcs[\"gear\"] = this_action[\"gear\"]\n else:\n # automatic gear control\n action_torcs[\"gear\"] = 1\n if client.S.d[\"speedX\"] > 50:\n action_torcs[\"gear\"] = 2\n if client.S.d[\"speedX\"] > 80:\n action_torcs[\"gear\"] = 3\n if client.S.d[\"speedX\"] > 110:\n action_torcs[\"gear\"] = 4\n if client.S.d[\"speedX\"] > 140:\n action_torcs[\"gear\"] = 5\n if client.S.d[\"speedX\"] > 170:\n action_torcs[\"gear\"] = 6\n\n # one-step dynamics update #################################\n # apply actions into TORCS\n client.respond_to_server()\n # get the response from TORCS\n client.get_servers_input()\n\n # get the current full-observation from TORCS\n obs = client.S.d\n\n # make an observation from a raw observation vector from TORCS\n self.observation = self.make_observaton(obs)\n \n # calculation of progress\n progress = np.array(obs[\"speedX\"]) * np.cos(obs[\"angle\"])\n\n # Designed Reward Function #######################################\n # This reward function enables agents to learn stable high-speed driving\n # with low Y-axis acceleration.\n # This reward function was designed after trial and error by me.\n\n if (obs[\"curLapTime\"] - self.time) > 0:\n Yac = (obs[\"speedY\"] - self.speedY) / (obs[\"curLapTime\"] - self.time)\n else:\n Yac = 0\n\n self.speedY = obs[\"speedY\"]\n self.time = obs[\"curLapTime\"]\n self.Yaclist.append(Yac)\n\n self.poshis.append(obs[\"trackPos\"])\n self.anglehis.append(obs[\"angle\"])\n self.sphis.append(obs[\"speedX\"])\n\n # reward for the low Y-axis acceleration\n eta_Yac = 1\n r_Yac = 1 / ((Yac / eta_Yac) ** 2 + 1)\n\n # reward for the small angle : 0 ~ 1\n eta_angle = 0.01\n r_angle = 1 / ((obs[\"angle\"] / eta_angle) ** 2 + 1)\n\n # reward for the small position from center : 0 ~ 1\n eta_pos = 0.01\n r_trackPos = 1 / ((obs[\"trackPos\"] / eta_pos) ** 2 + 1)\n\n # reward for the high X-axis speed : 0 ~ 1\n maxspeed = 100\n if obs[\"speedX\"] >= 0:\n r_speed = min(obs[\"speedX\"] / maxspeed, 1)\n else:\n r_speed = 0\n\n # reward function: -1 ~ 1\n reward = 0.2 * r_angle + 0.2 * r_trackPos + 0.3 * r_speed + 0.3 * r_Yac\n\n Yac_threshold = 3.530394 # 0.1G\n if np.abs(Yac) > Yac_threshold:\n reward = -min(np.abs(Yac) / 250, 1)\n\n # Termination judgement #########################\n track = np.array(obs[\"track\"])\n # episode terminates when the car is out of track\n if track.min() < 0:\n reward = -10\n client.R.d[\"meta\"] = True\n\n # episode terminates if the progress of agent is little\n if self.terminal_judge_start < self.time_step:\n if progress < self.termination_limit_progress:\n reward = -10\n client.R.d[\"meta\"] = True\n\n # episode terminates if the agent runs backward\n if np.cos(obs[\"angle\"]) < 0 or obs[\"distRaced\"] < 0:\n reward = -10\n client.R.d[\"meta\"] = True\n\n # episode terminates when the agent reaches the maximum distance\n if obs[\"distRaced\"] >= self.maximum_distance:\n reward = 10\n client.R.d[\"meta\"] = True\n\n if client.R.d[\"meta\"] is True: # send a reset signal\n poshis = np.array(self.poshis)\n anglehis = np.array(self.anglehis)\n sphis = np.array(self.sphis)\n Yachis = np.array(self.Yaclist)\n \n # For training episodes, display information about the vehicle in the finished driving\n if self.testmode == False:\n print(\"---------------------------------------------------------\")\n print(\"---> raced: \", obs[\"distRaced\"], \" m <---\")\n print(\"--- maxYac: \", np.max(Yachis), \" km/h/s ---\")\n print(\"--- minYac: \", np.min(Yachis), \" km/h/s ---\")\n if abs(np.max(Yachis)) >= abs(np.min(Yachis)):\n absmaxYac = abs(np.max(Yachis))\n else:\n absmaxYac = abs(np.min(Yachis))\n print(\"--- absmaxYac: \", absmaxYac, \" km/h/s ---\")\n print(\"--- meanYac: \", np.mean(Yachis), \" km/h/s +- \", np.std(Yachis), \"---\")\n print(\"--- medianYac: \", np.median(Yachis), \" km/h/s ---\")\n print(\"--- trackPos_mean: \", np.mean(poshis), \" +- \", np.std(poshis), \" ---\")\n print(\"--- angle_mean : \", np.mean(anglehis), \" rad +- \", np.std(anglehis), \" ---\")\n print(\"--- speedX_mean: \", np.mean(sphis), \" km/h +- \", np.std(sphis), \" ---\")\n print(\"---------------------------------------------------------\")\n \n self.initial_run = False\n client.respond_to_server()\n\n self.time_step += 1\n\n return self.get_obs(), reward, client.R.d[\"meta\"], {}\n\n def reset(self, relaunch=False):\n\n self.time_step = 0\n \n # If not true, send a reset signal to TORCS when the reset function is called\n if self.initial_reset is not True:\n self.client.R.d[\"meta\"] = True\n self.client.respond_to_server()\n\n ## TENTATIVE. Restarting TORCS for every episode will cause the memory leak bug!\n if relaunch is True:\n self.reset_torcs()\n\n # Modify here if you use multiple tracks in the environment\n # Open new UDP in vtorcs\n self.client = snakeoil3.Client(p=3101, vision=False) \n\n self.client.MAX_STEPS = np.inf\n\n client = self.client\n \n # get the initial input from TORCS\n client.get_servers_input()\n\n # get the current full observation from TORCS\n obs = client.S.d\n self.observation = self.make_observaton(obs)\n\n # reset variables and lists\n self.speedY = obs[\"speedY\"]\n self.time = obs[\"curLapTime\"]\n\n self.Yaclist = []\n self.poshis = []\n self.anglehis = []\n self.sphis = []\n\n self.initial_reset = False\n return self.get_obs()\n\n def close(self):\n os.system(\"pkill torcs\")\n\n def render(self, mode=\"human\"):\n # TORCS has a monitor of driving, so this method omitted.\n pass\n\n ####################################### making observation ############################################\n\n def get_obs(self):\n return self.observation\n\n def reset_torcs(self):\n os.system(\"pkill torcs\")\n time.sleep(0.5)\n\n if self.obsdim == 79:\n os.system(\"torcs &\")\n elif self.obsdim == 2:\n os.system(\"torcs -nofuel -nodamage -nolaptime &\")\n else:\n os.system(\"torcs -nofuel -nodamage -nolaptime &\")\n\n time.sleep(0.5)\n os.system(\"sh ./gym_torcs_kai/autostart.sh\")\n time.sleep(0.5)\n\n def agent_to_torcs(self, u):\n torcs_action = {\"steer\": u[0]}\n\n if self.throttle is True: # throttle action is enabled\n torcs_action.update({\"accel\": u[1]})\n\n if self.gear_change is True: # gear change action is enabled\n torcs_action.update({\"gear\": u[2]})\n\n return torcs_action\n\n def make_observaton(self, raw_obs):\n if self.obsdim == 79:\n obs1 = np.array(\n [\n raw_obs[\"angle\"],\n raw_obs[\"curLapTime\"],\n raw_obs[\"damage\"],\n raw_obs[\"distFromStart\"],\n raw_obs[\"distRaced\"],\n ]\n )\n focus = raw_obs[\"focus\"]\n obs2 = np.array([raw_obs[\"fuel\"], raw_obs[\"gear\"], raw_obs[\"lastLapTime\"]])\n opponents = raw_obs[\"opponents\"]\n obs3 = np.array(\n [\n raw_obs[\"racePos\"],\n raw_obs[\"rpm\"],\n raw_obs[\"speedX\"],\n raw_obs[\"speedY\"],\n raw_obs[\"speedZ\"],\n ]\n )\n track = raw_obs[\"track\"]\n trackPos = np.array([raw_obs[\"trackPos\"]])\n wheelSpinVel = raw_obs[\"wheelSpinVel\"]\n z = np.array(raw_obs[\"z\"])\n observ = np.hstack(\n [obs1, focus, obs2, opponents, obs3, track, trackPos, wheelSpinVel, z]\n )\n return observ\n\n elif self.obsdim == 2:\n return np.array([raw_obs[\"angle\"], raw_obs[\"trackPos\"]])\n\n elif self.obsdim == 31:\n\n obs1 = np.array(\n [\n raw_obs[\"angle\"],\n raw_obs[\"gear\"],\n raw_obs[\"rpm\"],\n raw_obs[\"speedX\"],\n raw_obs[\"speedY\"],\n raw_obs[\"speedZ\"],\n ]\n )\n\n trackPos = np.array([raw_obs[\"trackPos\"]])\n z = np.array(raw_obs[\"z\"])\n\n observ = np.hstack(\n [obs1, raw_obs[\"track\"], trackPos, raw_obs[\"wheelSpinVel\"], z]\n )\n\n return observ\n\n else:\n return None\n" ]
[ [ "numpy.hstack", "numpy.abs", "numpy.min", "numpy.median", "numpy.cos", "numpy.max", "numpy.std", "numpy.mean", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
samgregoost/self_supervised_large
[ "9c0c33cf374a1d5112519939012a64bca98c5f8d" ]
[ "mnist128.py" ]
[ "from __future__ import print_function\nimport tensorflow as tf\nimport numpy as np\nimport random\nimport TensorflowUtils as utils\nimport read_MITSceneParsingDataParis as scene_parsing\nimport datetime\nimport BatchDatsetReader as dataset\nfrom six.moves import xrange\n\nFLAGS = tf.flags.FLAGS\ntf.flags.DEFINE_integer(\"batch_size\", \"50\", \"batch size for training\")\ntf.flags.DEFINE_string(\"logs_dir\", \"/scratch1/ram095/nips20/logs_mnist128/\", \"path to logs directory\")\ntf.flags.DEFINE_string(\"data_dir\", \"/scratch1/ram095/nips20/paris_street\", \"path to dataset\")\ntf.flags.DEFINE_float(\"learning_rate\", \"1e-4\", \"Learning rate for Adam Optimizer\")\ntf.flags.DEFINE_string(\"model_dir\", \"Model_zoo/\", \"Path to vgg model mat\")\ntf.flags.DEFINE_bool('debug', \"False\", \"Debug mode: True/ False\")\ntf.flags.DEFINE_string('mode', \"train\", \"Mode train/ test/ visualize\")\n\nMODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'\n\nMAX_ITERATION = int(1e5 + 1)\nNUM_OF_CLASSESS = 3\nIMAGE_SIZE = 128\n\n\ndef vgg_net(weights, image):\n layers = (\n 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',\n\n 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',\n\n 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',\n 'relu3_3', 'conv3_4', 'relu3_4', 'pool3',\n\n 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',\n 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',\n\n 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',\n 'relu5_3', 'conv5_4', 'relu5_4'\n )\n\n net = {}\n current = image\n for i, name in enumerate(layers):\n kind = name[:4]\n if kind == 'conv':\n kernels, bias = weights[i][0][0][0][0]\n # matconvnet: weights are [width, height, in_channels, out_channels]\n # tensorflow: weights are [height, width, in_channels, out_channels]\n kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias)\n elif kind == 'relu':\n current = tf.nn.relu(current, name=name)\n if FLAGS.debug:\n utils.add_activation_summary(current)\n elif kind == 'pool':\n current = utils.avg_pool_2x2(current)\n net[name] = current\n\n return net\n\n'''\ndef decoder(image):\n model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)\n\n mean = model_data['normalization'][0][0][0]\n mean_pixel = np.mean(mean, axis=(0, 1))\n\n weights = np.squeeze(model_data['layers'])\n\n processed_image = utils.process_image(image, mean_pixel)\n with tf.variable_scope(\"decoder\"):\n image_net = vgg_net(weights, processed_image)\n conv_final_layer = image_net[\"conv5_3\"]\n\n pool5 = utils.max_pool_2x2(conv_final_layer)\n \n return pool5\n\n\n'''\n \n\n\ndef inference(image, keep_prob,z):\n \"\"\"\n Semantic segmentation network definition\n :param image: input image. Should have values in range 0-255\n :param keep_prob:\n :return:\n \"\"\"\n print(\"setting up vgg initialized conv layers ...\")\n model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)\n\n mean = model_data['normalization'][0][0][0]\n mean_pixel = np.mean(mean, axis=(0, 1))\n\n weights = np.squeeze(model_data['layers'])\n\n processed_image = utils.process_image(image, mean_pixel)\n\n with tf.variable_scope(\"inference\"):\n image_net = vgg_net(weights, processed_image)\n conv_final_layer = image_net[\"conv5_3\"]\n\n pool5 = utils.max_pool_2x2(conv_final_layer)\n\n W6 = utils.weight_variable([7, 7, 512, 4096], name=\"W6\")\n b6 = utils.bias_variable([4096], name=\"b6\")\n conv6 = utils.conv2d_basic(pool5, W6, b6)\n relu6 = tf.nn.relu(conv6, name=\"relu6\")\n if FLAGS.debug:\n utils.add_activation_summary(relu6)\n relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)\n\n W7 = utils.weight_variable([1, 1, 4096, 4096], name=\"W7\")\n b7 = utils.bias_variable([4096], name=\"b7\")\n conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)\n relu7 = tf.nn.relu(conv7, name=\"relu7\")\n if FLAGS.debug:\n utils.add_activation_summary(relu7)\n relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)\n\n W8 = utils.weight_variable([1, 1, 4096, 150], name=\"W8\")\n b8 = utils.bias_variable([150], name=\"b8\")\n\t\n # W_h = utils.weight_variable([1, 7, 7, 4], name=\"Wh\")\n conv8 = tf.reshape(utils.conv2d_basic(relu_dropout7, W8, b8),[-1,4*4*150])\n fc1 = tf.reshape(tf.layers.dense(conv8,4*4*150,activation = tf.nn.relu),[-1,4,4,150])\n \n \n\n concat1 = tf.concat([fc1, z],axis = 3)\n # annotation_pred1 = tf.argmax(conv8, dimension=3, name=\"prediction1\")\n print(\"###########################################################\")\n print(fc1)\n # now to upscale to actual image size\n deconv_shape1 = image_net[\"pool4\"].get_shape()\n W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, 278], name=\"W_t1\")\n b_t1 = utils.bias_variable([deconv_shape1[3].value], name=\"b_t1\")\n conv_t1 = utils.conv2d_transpose_strided(concat1, W_t1, b_t1, output_shape=tf.shape(image_net[\"pool4\"]))\n fuse_1 = tf.add(conv_t1, image_net[\"pool4\"], name=\"fuse_1\")\n\n deconv_shape2 = image_net[\"pool3\"].get_shape()\n W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name=\"W_t2\")\n b_t2 = utils.bias_variable([deconv_shape2[3].value], name=\"b_t2\")\n conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net[\"pool3\"]))\n fuse_2 = tf.add(conv_t2, image_net[\"pool3\"], name=\"fuse_2\")\n\n shape = tf.shape(image)\n deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], 3])\n W_t3 = utils.weight_variable([16, 16, 3, deconv_shape2[3].value], name=\"W_t3\")\n b_t3 = utils.bias_variable([3], name=\"b_t3\")\n conv_t3 = tf.nn.relu(utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8))\n\n annotation_pred = tf.argmax(conv_t3, dimension=3, name=\"prediction\")\n\n return tf.expand_dims(annotation_pred, dim=3), conv_t3\n\n\ndef train(loss_val, var_list):\n optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)\n grads = optimizer.compute_gradients(loss_val, var_list=var_list)\n if FLAGS.debug:\n # print(len(var_list))\n for grad, var in grads:\n utils.add_gradient_summary(grad, var)\n return optimizer.apply_gradients(grads)\n\ndef train_z(loss,Z):\n return tf.gradients(ys = loss, xs = Z)\n\n\ndef main(argv=None):\n keep_probability = tf.placeholder(tf.float32, name=\"keep_probabilty\")\n image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name=\"input_image\")\n annotation = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name=\"annotation\")\n z = tf.placeholder(tf.float32, shape=[None, 4, 4, 128], name=\"z\")\n\n # pred_annotation, logits = inference(image, keep_probability,z)\n # tf.summary.image(\"input_image\", image, max_outputs=2)\n # tf.summary.image(\"ground_truth\", tf.cast(annotation, tf.uint8), max_outputs=2)\n # tf.summary.image(\"pred_annotation\", tf.cast(pred_annotation, tf.uint8), max_outputs=2)\n# loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,\n # labels=tf.squeeze(annotation, squeeze_dims=[3]),\n # name=\"entropy\")))\n \n \n mask_ = tf.ones([FLAGS.batch_size,64,64,3])\n mask = tf.pad(mask_, [[0,0],[32,32],[32,32],[0,0]])\n\n mask2__ = tf.ones([FLAGS.batch_size,78,78,3])\n mask2_ = tf.pad(mask2__, [[0,0],[25,25],[25,25],[0,0]])\n mask2 = mask2_ - mask\n\n pred_annotation, logits = inference((1-mask)*image + mask*255, keep_probability,z)\n\n tf.summary.image(\"input_image\", image, max_outputs=2)\n tf.summary.image(\"ground_truth\", tf.cast(annotation, tf.uint8), max_outputs=2)\n tf.summary.image(\"pred_annotation\", tf.cast(pred_annotation, tf.uint8), max_outputs=2)\n\n # loss0 = tf.reduce_mean(tf.abs(z))\n loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square((image - logits)),[1,2,3])))\n # loss2 = tf.reduce_mean(tf.square((image - logits)*mask2))\n # loss = loss1 + loss2 + loss0\n # loss = tf.reduce_mean(tf.squared_difference(logits ,annotation ))\n loss_summary = tf.summary.scalar(\"entropy\", loss)\n \n grads = train_z(loss,z) \n\n trainable_var = tf.trainable_variables()\n if FLAGS.debug:\n for var in trainable_var:\n utils.add_to_regularization_and_summary(var)\n train_op = train(loss, trainable_var)\n\n print(\"Setting up summary op...\")\n summary_op = tf.summary.merge_all()\n\n print(\"Setting up image reader...\")\n train_records, valid_records = scene_parsing.read_dataset(FLAGS.data_dir)\n print(len(train_records))\n print(len(valid_records))\n\n print(\"Setting up dataset reader\")\n image_options = {'resize': True, 'resize_size': IMAGE_SIZE}\n if FLAGS.mode == 'train':\n train_dataset_reader = dataset.BatchDatset(train_records, image_options)\n validation_dataset_reader = dataset.BatchDatset(valid_records, image_options)\n\n sess = tf.Session()\n\n print(\"Setting up Saver...\")\n saver = tf.train.Saver()\n\n # create two summary writers to show training loss and validation loss in the same graph\n # need to create two folders 'train' and 'validation' inside FLAGS.logs_dir\n train_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/train', sess.graph)\n validation_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/validation')\n\n sess.run(tf.global_variables_initializer())\n ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\"Model restored...\")\n\n if FLAGS.mode == \"train\":\n for itr in xrange(MAX_ITERATION):\n \n train_images, train_annotations = train_dataset_reader.next_batch(FLAGS.batch_size)\n z_ = np.random.uniform(low=-1.0, high=1.0, size=(FLAGS.batch_size,4,4,128))\n # print(train_images)\n feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85, z: z_}\n #train_images[:,50:100,50:100,:] =0\n v = 0\n \n for p in range(10):\n z_ol = np.copy(z_)\n # print(\"666666666666666666666666666666666666666\")\n z_loss, summ = sess.run([loss,loss_summary], feed_dict=feed_dict)\n print(\"Step: %d, z_step: %d, Train_loss:%g\" % (itr,p,z_loss))\n# print(z_) \n g = sess.run([grads],feed_dict=feed_dict)\n v_prev = np.copy(v)\n # print(g[0][0].shape)\n v = 0.001*v - 0.1*g[0][0]\n z_ += 0.001 * v_prev + (1+0.001)*v\n # z_ = np.clip(z_, -1.0, 1.0)\n # print(v.shape)\n # print(z_.shape)\n feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85, z: z_}\n sess.run(train_op, feed_dict=feed_dict)\n\n if itr % 10 == 0:\n train_loss, summary_str = sess.run([loss, loss_summary], feed_dict=feed_dict)\n print(\"Step: %d, Train_loss:%g\" % (itr, train_loss))\n \n train_writer.add_summary(summary_str, itr)\n \n\n if itr % 500 == 0:\n valid_images, valid_annotations = validation_dataset_reader.next_batch(FLAGS.batch_size)\n \n valid_loss, summary_sva = sess.run([loss, loss_summary], feed_dict={image: valid_images, annotation: valid_annotations,\n keep_probability: 1.0, z: z_})\n print(\"%s ---> Validation_loss: %g\" % (datetime.datetime.now(), valid_loss))\n\n # add validation loss to TensorBoard\n validation_writer.add_summary(summary_sva, itr)\n saver.save(sess, FLAGS.logs_dir + \"model_z_center.ckpt\", 500)\n\n elif FLAGS.mode == \"visualize\":\n valid_images, valid_annotations = validation_dataset_reader.get_random_batch(50)\n z_ = np.random.uniform(low=-1.0, high=1.0, size=(FLAGS.batch_size,4,4,128))\n feed_dict = {image: valid_images, annotation: valid_annotations, keep_probability: 0.85, z: z_}\n v= 0\n for p in range(50):\n z_ol = np.copy(z_)\n # print(\"666666666666666666666666666666666666666\")\n z_loss, summ = sess.run([loss,loss_summary], feed_dict=feed_dict)\n print(\"z_step: %d, Train_loss:%g\" % (p,z_loss))\n# print(z_)\n g = sess.run([grads],feed_dict=feed_dict)\n v_prev = np.copy(v)\n # print(g[0][0].shape)\n v = 0.001*v - 0.1*g[0][0]\n z_ += 0.001 * v_prev + (1+0.001)*v\n # z_ = np.clip(z_, -1.0, 1.0)\n \n pred = sess.run(logits, feed_dict={image: valid_images, annotation: valid_annotations,z:z_,\n keep_probability: 1.0})\n \n\n \n valid_images_masked = (1-sess.run(mask))*valid_images\n predicted_patch = sess.run(mask) * pred\n pred = valid_images_masked + predicted_patch \n # valid_annotations = np.squeeze(valid_annotations, axis=3)\n # pred = np.squeeze(pred, axis=3)\n print(valid_images.shape)\n print(valid_annotations.shape)\n print(pred.shape)\n\n for itr in range(FLAGS.batch_size):\n utils.save_image(valid_images_masked[itr].astype(np.uint8), FLAGS.logs_dir, name=\"inp_\" + str(5+itr))\n utils.save_image(valid_annotations[itr].astype(np.uint8), FLAGS.logs_dir, name=\"gt_\" + str(5+itr))\n utils.save_image(pred[itr].astype(np.uint8), FLAGS.logs_dir, name=\"predz_\" + str(5+itr))\n print(\"Saved image: %d\" % itr)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.concat", "tensorflow.stack", "numpy.squeeze", "tensorflow.cast", "numpy.mean", "tensorflow.train.AdamOptimizer", "tensorflow.flags.DEFINE_float", "tensorflow.pad", "tensorflow.summary.scalar", "tensorflow.summary.image", "tensorflow.gradients", "tensorflow.layers.dense", "numpy.copy", "tensorflow.add", "tensorflow.Session", "tensorflow.square", "tensorflow.trainable_variables", "tensorflow.train.Saver", "tensorflow.argmax", "tensorflow.app.run", "tensorflow.nn.dropout", "tensorflow.shape", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "numpy.transpose", "tensorflow.flags.DEFINE_bool", "tensorflow.flags.DEFINE_integer", "tensorflow.train.get_checkpoint_state", "tensorflow.nn.relu", "tensorflow.summary.FileWriter", "tensorflow.flags.DEFINE_string", "tensorflow.ones", "tensorflow.expand_dims", "tensorflow.variable_scope", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
sizhky/carla-dataset-runner
[ "a670d981d29de78460cd90b1d4949ee4b71d0ade" ]
[ "HDF5Saver.py" ]
[ "import h5py\nimport numpy as np\n\n\nclass HDF5Saver:\n def __init__(self, sensor_width, sensor_height, file_path_to_save=\"data/carla_dataset.hdf5\"):\n self.sensor_width = sensor_width\n self.sensor_height = sensor_height\n\n self.file = h5py.File(file_path_to_save, \"w\")\n # Creating groups to store each type of data\n self.rgb_group = self.file.create_group(\"rgb\")\n self.depth_group = self.file.create_group(\"depth\")\n self.ego_speed_group = self.file.create_group(\"ego_speed\")\n self.bounding_box_group = self.file.create_group(\"bounding_box\")\n self.bb_vehicles_group = self.bounding_box_group.create_group(\"vehicles\")\n self.bb_walkers_group = self.bounding_box_group.create_group(\"walkers\")\n self.timestamp_group = self.file.create_group(\"timestamps\")\n\n # Storing metadata\n self.file.attrs['sensor_width'] = sensor_width\n self.file.attrs['sensor_height'] = sensor_height\n self.file.attrs['simulation_synchronization_type'] = \"syncd\"\n self.rgb_group.attrs['channels'] = 'R,G,B'\n self.ego_speed_group.attrs['x,y,z_velocity'] = 'in m/s'\n self.bounding_box_group.attrs['data_description'] = 'Each 4 entries in the same row present one individual actor in the scene.'\n self.bounding_box_group.attrs['bbox_format'] = '[xmin, ymin, xmax, ymax] (top left coords; right bottom coords)' \\\n 'the vector has been flattened; therefore the data must' \\\n 'be captured in blocks of 4 elements'\n self.timestamp_group.attrs['time_format'] = \"current time in MILISSECONDS since the unix epoch \" \\\n \"(time.time()*1000 in python3)\"\n\n def record_data(self, rgb_array, depth_array, bounding_box, ego_speed, timestamp):\n timestamp = str(timestamp)\n self.rgb_group.create_dataset(timestamp, data=rgb_array)\n self.depth_group.create_dataset(timestamp, data=depth_array)\n self.ego_speed_group.create_dataset(timestamp, data=ego_speed)\n self.bb_vehicles_group.create_dataset(timestamp, data=bounding_box[0])\n self.bb_walkers_group.create_dataset(timestamp, data=bounding_box[1])\n\n def record_all_timestamps(self, timestamps_list):\n self.timestamp_group.create_dataset(\"timestamps\", data=np.array(timestamps_list))\n\n def close_HDF5(self):\n self.file.close()\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VinhLoiIT/ignite
[ "3b2b9655ea9f80ce49b8a9f1c2d72f80e2a95f56", "a22a0f5e909ac70d2a1f76a60b6e84b2134f196c" ]
[ "ignite/contrib/engines/common.py", "ignite/contrib/metrics/regression/median_absolute_error.py" ]
[ "from functools import partial\nimport warnings\nimport numbers\n\nfrom collections.abc import Sequence, Mapping\n\nimport torch\nimport torch.distributed as dist\n\nfrom ignite.engine import Engine, Events\nfrom ignite.metrics import RunningAverage\nfrom ignite.handlers import TerminateOnNan, ModelCheckpoint, EarlyStopping\nfrom ignite.contrib.metrics import GpuInfo\nfrom ignite.contrib.handlers import ProgressBar\nfrom ignite.contrib.handlers import VisdomLogger\nfrom ignite.contrib.handlers import TensorboardLogger, global_step_from_engine\nimport ignite.contrib.handlers.tensorboard_logger as tb_logger_module\nimport ignite.contrib.handlers.visdom_logger as visdom_logger_module\nfrom ignite.contrib.handlers import MLflowLogger\nimport ignite.contrib.handlers.mlflow_logger as mlflow_logger_module\nfrom ignite.contrib.handlers import PolyaxonLogger\nimport ignite.contrib.handlers.polyaxon_logger as polyaxon_logger_module\n\n\ndef setup_common_training_handlers(\n trainer,\n train_sampler=None,\n to_save=None,\n save_every_iters=1000,\n output_path=None,\n lr_scheduler=None,\n with_gpu_stats=False,\n output_names=None,\n with_pbars=True,\n with_pbar_on_iters=True,\n log_every_iters=100,\n device=\"cuda\",\n):\n \"\"\"Helper method to setup trainer with common handlers (it also supports distributed configuration):\n - :class:`~ignite.handlers.TerminateOnNan`\n - handler to setup learning rate scheduling\n - :class:`~ignite.handlers.ModelCheckpoint`\n - :class:`~ignite.metrics.RunningAverage` on `update_function` output\n - Two progress bars on epochs and optionally on iterations\n\n Args:\n trainer (Engine): trainer engine. Output of trainer's `update_function` should be a dictionary\n or sequence or a single tensor.\n train_sampler (torch.utils.data.DistributedSampler, optional): Optional distributed sampler used to call\n `set_epoch` method on epoch started event.\n to_save (dict, optional): dictionary with objects to save in the checkpoint. This is used with\n :class:`~ignite.handlers.ModelCheckpoint`.\n save_every_iters (int, optional): saving interval. By default, `to_save` objects are stored\n each 1000 iterations.\n output_path (str, optional): output path to indicate where `to_save` objects are stored.\n lr_scheduler (ParamScheduler or subclass of `torch.optim.lr_scheduler._LRScheduler`): learning rate scheduler\n as native torch LRScheduler or ignite's parameter scheduler.\n with_gpu_stats (bool, optional): if True, :class:`~ignite.contrib.metrics.handlers.GpuInfo` is attached to the\n trainer. This requires `pynvml` package to be installed.\n output_names (list/tuple): list of names associated with `update_function` output dictionary.\n with_pbars (bool, optional): if True, two progress bars on epochs and optionally on iterations are attached\n with_pbar_on_iters (bool, optional): if True, a progress bar on iterations is attached to the trainer.\n log_every_iters (int, optional): logging interval for :class:`~ignite.contrib.metrics.handlers.GpuInfo` and for\n epoch-wise progress bar.\n device (str of torch.device, optional): Optional device specification in case of distributed computation usage.\n \"\"\"\n kwargs = dict(\n to_save=to_save,\n save_every_iters=save_every_iters,\n output_path=output_path,\n lr_scheduler=lr_scheduler,\n with_gpu_stats=with_gpu_stats,\n output_names=output_names,\n with_pbars=with_pbars,\n with_pbar_on_iters=with_pbar_on_iters,\n log_every_iters=log_every_iters,\n device=device,\n )\n if dist.is_available() and dist.is_initialized():\n _setup_common_distrib_training_handlers(trainer, train_sampler=train_sampler, **kwargs)\n else:\n if train_sampler is not None:\n warnings.warn(\n \"Argument train_sampler distributed sampler used to call `set_epoch` method on epoch \"\n \"started event, but no distributed setting detected\",\n UserWarning,\n )\n _setup_common_training_handlers(trainer, **kwargs)\n\n\nsetup_common_distrib_training_handlers = setup_common_training_handlers\n\n\ndef _setup_common_training_handlers(\n trainer,\n to_save=None,\n save_every_iters=1000,\n output_path=None,\n lr_scheduler=None,\n with_gpu_stats=True,\n output_names=None,\n with_pbars=True,\n with_pbar_on_iters=True,\n log_every_iters=100,\n device=\"cuda\",\n):\n trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())\n\n if lr_scheduler is not None:\n if isinstance(lr_scheduler, torch.optim.lr_scheduler._LRScheduler):\n trainer.add_event_handler(Events.ITERATION_COMPLETED, lambda engine: lr_scheduler.step())\n else:\n trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)\n\n trainer.add_event_handler(Events.EPOCH_COMPLETED, empty_cuda_cache)\n\n if to_save is not None:\n if output_path is None:\n raise ValueError(\"If to_save argument is provided then output_path argument should be also defined\")\n checkpoint_handler = ModelCheckpoint(dirname=output_path, filename_prefix=\"training\")\n trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler, to_save)\n\n if with_gpu_stats:\n GpuInfo().attach(trainer, name=\"gpu\", event_name=Events.ITERATION_COMPLETED(every=log_every_iters))\n\n if output_names is not None:\n\n def output_transform(x, index, name):\n if isinstance(x, Mapping):\n return x[name]\n elif isinstance(x, Sequence):\n return x[index]\n elif isinstance(x, (torch.Tensor, numbers.Number)):\n return x\n else:\n raise ValueError(\n \"Unhandled type of update_function's output. \"\n \"It should either mapping or sequence, but given {}\".format(type(x))\n )\n\n for i, n in enumerate(output_names):\n RunningAverage(\n output_transform=partial(output_transform, index=i, name=n), epoch_bound=False, device=device\n ).attach(trainer, n)\n\n if with_pbars:\n if with_pbar_on_iters:\n ProgressBar(persist=False).attach(\n trainer, metric_names=\"all\", event_name=Events.ITERATION_COMPLETED(every=log_every_iters)\n )\n\n ProgressBar(persist=True, bar_format=\"\").attach(\n trainer, event_name=Events.EPOCH_STARTED, closing_event_name=Events.COMPLETED\n )\n\n\ndef _setup_common_distrib_training_handlers(\n trainer,\n train_sampler=None,\n to_save=None,\n save_every_iters=1000,\n output_path=None,\n lr_scheduler=None,\n with_gpu_stats=True,\n output_names=None,\n with_pbars=True,\n with_pbar_on_iters=True,\n log_every_iters=100,\n device=\"cuda\",\n):\n if not (dist.is_available() and dist.is_initialized()):\n raise RuntimeError(\"Distributed setting is not initialized, please call `dist.init_process_group` before.\")\n\n _setup_common_training_handlers(\n trainer,\n to_save=None,\n lr_scheduler=lr_scheduler,\n with_gpu_stats=with_gpu_stats,\n output_names=output_names,\n with_pbars=(dist.get_rank() == 0) and with_pbars,\n with_pbar_on_iters=with_pbar_on_iters,\n log_every_iters=log_every_iters,\n device=device,\n )\n\n if train_sampler is not None:\n if not callable(getattr(train_sampler, \"set_epoch\", None)):\n raise TypeError(\"Train sampler should have `set_epoch` method\")\n\n @trainer.on(Events.EPOCH_STARTED)\n def distrib_set_epoch(engine):\n train_sampler.set_epoch(engine.state.epoch - 1)\n\n if dist.get_rank() == 0:\n if to_save is not None:\n if output_path is None:\n raise ValueError(\"If to_save argument is provided then output_path argument should be also defined\")\n checkpoint_handler = ModelCheckpoint(dirname=output_path, filename_prefix=\"training\")\n trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler, to_save)\n\n\ndef empty_cuda_cache(_):\n torch.cuda.empty_cache()\n import gc\n\n gc.collect()\n\n\ndef setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, log_every_iters):\n if optimizers is not None:\n from torch.optim.optimizer import Optimizer\n\n if not isinstance(optimizers, (Optimizer, Mapping)):\n raise TypeError(\"Argument optimizers should be either a single optimizer or a dictionary or optimizers\")\n\n if evaluators is not None:\n if not isinstance(evaluators, (Engine, Mapping)):\n raise TypeError(\"Argument optimizers should be either a single optimizer or a dictionary or optimizers\")\n\n if log_every_iters is None:\n log_every_iters = 1\n\n logger.attach(\n trainer,\n log_handler=logger_module.OutputHandler(tag=\"training\", metric_names=\"all\"),\n event_name=Events.ITERATION_COMPLETED(every=log_every_iters),\n )\n\n if optimizers is not None:\n # Log optimizer parameters\n if isinstance(optimizers, Optimizer):\n optimizers = {None: optimizers}\n\n for k, optimizer in optimizers.items():\n logger.attach(\n trainer,\n log_handler=logger_module.OptimizerParamsHandler(optimizer, param_name=\"lr\", tag=k),\n event_name=Events.ITERATION_STARTED(every=log_every_iters),\n )\n\n if evaluators is not None:\n # Log evaluation metrics\n if isinstance(evaluators, Engine):\n evaluators = {\"validation\": evaluators}\n\n for k, evaluator in evaluators.items():\n gst = global_step_from_engine(trainer)\n logger.attach(\n evaluator,\n log_handler=logger_module.OutputHandler(tag=k, metric_names=\"all\", global_step_transform=gst),\n event_name=Events.COMPLETED,\n )\n\n\ndef setup_tb_logging(output_path, trainer, optimizers=None, evaluators=None, log_every_iters=100):\n \"\"\"Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are:\n - Training metrics, e.g. running average loss values\n - Learning rate(s)\n - Evaluation metrics\n\n Args:\n output_path (str): logging directory path\n trainer (Engine): trainer engine\n optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of\n torch optimizers. If a dictionary, keys are used as tags arguments for logging.\n evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,\n keys are used as tags arguments for logging.\n log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,\n value can be set to 1 or None.\n\n Returns:\n TensorboardLogger\n \"\"\"\n tb_logger = TensorboardLogger(log_dir=output_path)\n setup_any_logging(tb_logger, tb_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters)\n return tb_logger\n\n\ndef setup_visdom_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs):\n \"\"\"Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are:\n - Training metrics, e.g. running average loss values\n - Learning rate(s)\n - Evaluation metrics\n\n Args:\n trainer (Engine): trainer engine\n optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of\n torch optimizers. If a dictionary, keys are used as tags arguments for logging.\n evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,\n keys are used as tags arguments for logging.\n log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,\n value can be set to 1 or None.\n **kwargs: kwargs to pass into VisdomLogger\n\n Returns:\n VisdomLogger\n \"\"\"\n vis_logger = VisdomLogger(**kwargs)\n setup_any_logging(\n vis_logger, visdom_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters\n )\n return vis_logger\n\n\ndef setup_mlflow_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100):\n \"\"\"Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:\n - Training metrics, e.g. running average loss values\n - Learning rate(s)\n - Evaluation metrics\n\n Args:\n trainer (Engine): trainer engine\n optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of\n torch optimizers. If a dictionary, keys are used as tags arguments for logging.\n evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,\n keys are used as tags arguments for logging.\n log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,\n value can be set to 1 or None.\n\n Returns:\n MLflowLogger\n \"\"\"\n mlflow_logger = MLflowLogger()\n setup_any_logging(\n mlflow_logger, mlflow_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters\n )\n return mlflow_logger\n\n\ndef setup_plx_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100):\n \"\"\"Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:\n - Training metrics, e.g. running average loss values\n - Learning rate(s)\n - Evaluation metrics\n\n Args:\n trainer (Engine): trainer engine\n optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of\n torch optimizers. If a dictionary, keys are used as tags arguments for logging.\n evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,\n keys are used as tags arguments for logging.\n log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,\n value can be set to 1 or None.\n\n Returns:\n PolyaxonLogger\n \"\"\"\n plx_logger = PolyaxonLogger()\n setup_any_logging(\n plx_logger, polyaxon_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters\n )\n return plx_logger\n\n\ndef get_default_score_fn(metric_name):\n def wrapper(engine):\n score = engine.state.metrics[metric_name]\n return score\n\n return wrapper\n\n\ndef save_best_model_by_val_score(output_path, evaluator, model, metric_name, n_saved=3, trainer=None, tag=\"val\"):\n \"\"\"Method adds a handler to `evaluator` to save best models based on the score (named by `metric_name`)\n provided by `evaluator`.\n\n Args:\n output_path (str): output path to indicate where to save best models\n evaluator (Engine): evaluation engine used to provide the score\n model (nn.Module): model to store\n metric_name (str): metric name to use for score evaluation. This metric should be present in\n `evaluator.state.metrics`.\n n_saved (int, optional): number of best models to store\n trainer (Engine, optional): trainer engine to fetch the epoch when saving the best model.\n tag (str, optional): score name prefix: `{tag}_{metric_name}`. By default, tag is \"val\".\n\n \"\"\"\n global_step_transform = None\n if trainer is not None:\n global_step_transform = global_step_from_engine(trainer)\n\n best_model_handler = ModelCheckpoint(\n dirname=output_path,\n filename_prefix=\"best\",\n n_saved=n_saved,\n global_step_transform=global_step_transform,\n score_name=\"{}_{}\".format(tag, metric_name.lower()),\n score_function=get_default_score_fn(metric_name),\n )\n evaluator.add_event_handler(Events.COMPLETED, best_model_handler, {\"model\": model,})\n\n\ndef add_early_stopping_by_val_score(patience, evaluator, trainer, metric_name):\n \"\"\"Method setups early stopping handler based on the score (named by `metric_name`) provided by `evaluator`.\n\n Args:\n patience (int): number of events to wait if no improvement and then stop the training.\n evaluator (Engine): evaluation engine used to provide the score\n trainer (Engine): trainer engine to stop the run if no improvement.\n metric_name (str): metric name to use for score evaluation. This metric should be present in\n `evaluator.state.metrics`.\n\n \"\"\"\n es_handler = EarlyStopping(patience=patience, score_function=get_default_score_fn(metric_name), trainer=trainer)\n evaluator.add_event_handler(Events.COMPLETED, es_handler)\n", "import torch\n\nfrom ignite.contrib.metrics.regression._base import _BaseRegressionEpoch\n\n\ndef median_absolute_error_compute_fn(y_pred, y):\n e = torch.abs(y.view_as(y_pred) - y_pred)\n return torch.median(e).item()\n\n\nclass MedianAbsoluteError(_BaseRegressionEpoch):\n r\"\"\"\n Calculates the Median Absolute Error:\n\n :math:`\\text{MdAE} = \\text{MD}_{j=1,n} \\left( |A_j - P_j| \\right)`,\n\n where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.\n\n More details can be found in `Botchkarev 2018`__.\n\n - `update` must receive output of the form `(y_pred, y)` or `{'y_pred': y_pred, 'y': y}`.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.\n\n .. warning::\n\n Current implementation stores all input data (output and target) in as tensors before computing a metric.\n This can potentially lead to a memory error if the input data is larger than available RAM.\n\n\n __ https://arxiv.org/abs/1809.03006\n\n \"\"\"\n\n def __init__(self, output_transform=lambda x: x):\n super(MedianAbsoluteError, self).__init__(median_absolute_error_compute_fn, output_transform)\n" ]
[ [ "torch.distributed.get_rank", "torch.distributed.is_available", "torch.distributed.is_initialized", "torch.cuda.empty_cache" ], [ "torch.median" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tarkantemizoz/Cost-Sensitive-Learning
[ "083f8dfd2950b7e3874df34bf61c2ca1e4a91fbb", "083f8dfd2950b7e3874df34bf61c2ca1e4a91fbb" ]
[ "Models/opt_torch.py", "data_formatters/base.py" ]
[ "# coding: utf-8\n# Copyright 2020 Tarkan Temizoz\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom sklearn.utils import shuffle\nfrom Models.linearnet import LinearNet\n\nclass Optimization:\n \"\"\" A helper class to train, test and diagnose Cost-sensitive Logistic Regression\n \n Attributes:\n model: CSLR model.\n optimizer: Optimizer of the network.\n train_return: List of train returns.\n val_return: List of validation returns.\n validation: Whether there is validation data.\n batch_size: Batch-size of the network.\n n_epochs: Total number of epochs.\n n_steps: Number of epochs to evaluate the results\n \"\"\"\n \n def __init__(self, model, optimizer, config):\n \"\"\"Initialises CLSR.\n \n Args:\n model: CSLR model.\n optimizer: Optimizer of the network.\n config: Configuration of the network.\n \"\"\"\n \n self.model = model\n self.optimizer = optimizer\n self.train_return = []\n self.val_return = []\n self.validation = False\n self.batch_size = config.get(\"batch_size\",32)\n self.n_epochs = config.get(\"n_epochs\", 1000)\n self.n_steps = config.get(\"n_steps\", self.n_epochs)\n \n @staticmethod\n def batch(iterable, n):\n \"\"\"Creates batches.\"\"\"\n \n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx:min(ndx + n, l)] \n \n def train(self, x_train, r_train, x_val=None, r_val=None):\n \"\"\"Applies simple feed-forward network to an input.\n \n Args:\n x_train: train features\n r_train: train returns\n x_val: validation features\n r_val: validation returns\n \"\"\"\n \n if x_val is not None or r_val is not None:\n self.validation = True\n start_time = time.time()\n \n for epoch in range(self.n_epochs):\n x_shuff, r_shuff = shuffle(x_train, r_train)\n self.model.train() \n for j in self.batch(range(0, len(x_shuff)),self.batch_size):\n if len(j) < 2:\n break\n x_batch = x_shuff[j]\n r_batch = r_shuff[j]\n self.optimizer.zero_grad()\n outputs, _, _ = self.model(x_batch)\n loss = -torch.mul(outputs, r_batch).sum() \n loss.backward()\n self.optimizer.step()\n\n returns_train, _, _ = self.evaluate(x_train, r_train)\n self.train_return.append(returns_train)\n if self.validation is True:\n returns_val, _, _ = self.evaluate(x_val, r_val)\n self.val_return.append(returns_val)\n \n if ((epoch+1) % self.n_steps == 0):\n elapsed = time.time() - start_time\n print(\n (\"Epoch %d Train Return: %.3f.\") % (epoch + 1, self.train_return[-1]),\n ((\" Validation Return: %.3f. Elapsed time: %.3fs.\")\n % (self.val_return[-1], elapsed)\n if self.validation is True else \n \" Elapsed time: %.3fs.\"\n % elapsed) \n )\n start_time = time.time() \n \n def evaluate(self, x_test, r_test):\n \"\"\"Evaluates simple feed-forward network to an input.\n \n Args:\n x_test: features of the evaluated data\n r_test: returns of the evaluated data\n \n Returns:\n Triple of Tensors for: (Total returns, decision variables, probabilities)\n \"\"\"\n \n with torch.no_grad():\n outputs, probs, _ = self.model(x_test)\n returns = torch.mul(outputs, r_test).sum()\n \n return returns, outputs, probs \n \n def plot_return(self):\n \"\"\"Draws a plot, Trains Returns vs Test Returns\"\"\"\n \n plt.plot(self.train_return, label=\"Train Return\")\n plt.plot(self.val_return, label=\"Test Return\")\n plt.legend()\n plt.title(\"Returns\")\n\n", "# coding: utf-8\n# Copyright 2020 Tarkan Temizoz\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Default data formatting functions for experiments.\nFor new datasets, inherit form GenericDataFormatter and implement\nall abstract functions.\nThese dataset-specific methods:\n1) Perform the necessary input feature engineering & normalisation steps\n2) Are responsible for train, validation and test splits\n\"\"\"\n\nimport os\nimport abc\nimport pickle\n\nfrom sklearn.model_selection import StratifiedKFold\n\nclass GenericDataFormatter(abc.ABC):\n \"\"\"Abstract base class for all data formatters.\n User can implement the abstract methods below to perform dataset-specific\n manipulations.\n \"\"\"\n\n @abc.abstractmethod\n def transform_inputs(self, train, test=None, valid=None):\n \"\"\"Performs feature transformation.\"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def split_data(self):\n \"\"\"Performs the default train and test splits.\"\"\"\n raise NotImplementedError()\n\n def perform_validation(self, n):\n \"\"\"Performs validation sets.\"\"\"\n\n fixed_params = self.get_fixed_params()\n \n self.train = []\n self.valid = []\n self.val_set = [] \n self.train_set = []\n self.n_splits = fixed_params.get(\"n_splits\", 10) \n skf = StratifiedKFold(self.n_splits, shuffle=True, random_state=self.seed)\n \n for train_index, val_index in skf.split(self.x_train, self.y_train):\n \n self.train_set.append(train_index)\n self.val_set.append(val_index)\n self.train.append([self.x_train[train_index],\n self.r_train[train_index],\n self.rmax_train[train_index],\n self.y_train[train_index]]\n )\n self.valid.append([self.x_train[val_index],\n self.r_train[val_index],\n self.rmax_train[val_index],\n self.y_train[val_index]]\n )\n\n pickle.dump(self.train_set,\n open(self.data_path+\"_train_sets.dat\", \"wb\")\n )\n pickle.dump(self.val_set,\n open(self.data_path+\"_valid_sets.dat\", \"wb\")\n )\n \n\n def load_data(self, split=None):\n \"\"\"Returns train, test and validation data for experiments.\"\"\"\n\n train, test, valid = self.train, self.test, self.valid \n fixed_params = self.get_experiment_params()\n \n if fixed_params['validation'] == True:\n \n train = train[split]\n valid = valid[split]\n \n if fixed_params['testing'] == False:\n \n test = valid\n valid = []\n \n self.model_path = self.model_path_temp+\"_\"+str(split)\n \n return train, test, valid\n\n def save_models(self, n, expt, simulated_expt):\n \"\"\"Save the data for experiments.\n Args:\n n, expt, simulated_expt =\n # of repeat, name of the experiment, name of the simulated experiments\n \"\"\"\n \n for k in simulated_expt:\n if expt == k:\n data_path = self.data_folder+self.expt_path\n model_path = self.model_folder+self.expt_path\n results_path = self.results_folder+self.expt_path\n break\n else:\n data_path = self.data_folder\n model_path = self.model_folder\n results_path = self.results_folder\n \n self.data_path = data_path+\"_\"+str(n)\n self.model_path = model_path+\"_\"+str(n)\n self.results_path = results_path\n self.model_path_temp = self.model_path\n self.results_path_temp = self.results_path \n \n data = [self.train, self.test] \n pickle.dump(data, open(self.data_path+\".dat\", \"wb\"))\n \n @abc.abstractmethod\n def get_fixed_params(self):\n \"\"\"Defines the fixed parameters used by the model for training.\n Requires the following keys:\n to be defined...\n Returns:\n A dictionary of fixed parameters, e.g.:\n fixed_params = {\n 'n_epochs': 1000,\n 'device': \"cpu\",\n 'num_repeats': 1,\n 'testing': True,\n 'validation': False,\n 'scaler': True\n }\n \"\"\"\n raise NotImplementedError \n\n def get_experiment_params(self):\n \"\"\"Returns fixed model parameters for experiments.\"\"\"\n\n required_keys = [\n 'n_epochs', 'num_repeats', 'device', 'testing', 'validation', 'scaler'\n ]\n\n fixed_params = self.get_fixed_params()\n\n for k in required_keys:\n if k not in fixed_params:\n raise ValueError('Field {}'.format(k) +\n ' missing from fixed parameter definitions!')\n \n if fixed_params['testing'] == False and fixed_params['validation'] == False:\n raise ValueError('Please determine test or validation sets! ') \n \n return fixed_params\n\n @abc.abstractmethod\n def get_tuning_params(self):\n \"\"\"Defines the fixed parameters used by the model for training.\n Requires the following keys:\n to be defined....\n Returns:\n A dictionary of fixed parameters, e.g.:\n fixed_params = {\n 'bayes_trials': 20,\n 'batch_size_bayes': [],\n 'dnn_layers_bayes': [],\n 'inner_cval': True,\n }\n \"\"\"\n raise NotImplementedError \n\n def get_bayes_params(self):\n \"\"\"Returns bayesian optimization parameters for experiments.\"\"\"\n\n required_keys = [\n 'bayes_trials', 'batch_size_bayes', 'dnn_layers_bayes', 'inner_cval'\n ]\n bayes_params = self.get_tuning_params()\n \n if self.bayes == True:\n for k in required_keys:\n if k not in bayes_params:\n raise ValueError('Field {}'.format(k) +\n ' missing from bayes parameter definitions!')\n\n return bayes_params\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "sklearn.utils.shuffle", "matplotlib.pyplot.plot", "torch.mul", "torch.no_grad" ], [ "sklearn.model_selection.StratifiedKFold" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nong-fu/grabcut
[ "19a43eed7597ffae456349e4f0568da2f8f1f25c" ]
[ "grabcut.py" ]
[ "# coding=utf-8\n\nimport sys\nfrom pathlib import Path\nimport webbrowser\n\nimport numpy as np\nimport cv2\nfrom PIL import Image\n\nfrom PyQt5.QtCore import QDir, Qt, pyqtSlot, pyqtSignal\nfrom PyQt5.QtGui import QImage, QPixmap, QColor\nfrom PyQt5.QtWidgets import (\n QApplication, QMainWindow, QWidget,\n QMessageBox, QFileDialog, QLabel, QSpinBox, QPushButton,\n QActionGroup, QAction, QSizePolicy, QHBoxLayout,\n)\n\nfrom ui_grabcut import Ui_MainWindow\n\n\nclass Canvas(QLabel):\n \"\"\"Canvas for drawing mask layer on Image.\n \"\"\"\n\n mousePressed = pyqtSignal()\n mouseMoved = pyqtSignal(int, int, int, int)\n\n def __init__(self, parent):\n super().__init__(parent)\n self.parent = parent\n # self.setStyleSheet(\"border: 1px solid red;\")\n self.last_x, self.last_y = None, None\n\n def mousePressEvent(self, e):\n self.mousePressed.emit()\n\n def mouseMoveEvent(self, e):\n x, y = e.x(), e.y()\n\n if self.last_x is None:\n self.last_x, self.last_y = x, y\n return\n\n self.mouseMoved.emit(self.last_x, self.last_y, x, y)\n self.last_x, self.last_y = x, y\n\n def mouseReleaseEvent(self, e):\n self.last_x, self.last_y = None, None\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n\n # orign image data\n self.img = None\n # mask layer for grabcut\n self.mask = None\n # history masks for undo\n self.masks = []\n # grabcut algorithm param iterCount\n self.iterCount = 5\n\n # canvas image cache\n self.imgWithMask = None\n # mask mode to color, don't use dict, too slow!\n self.mode2color = (\n # cv2.GC_BGD == 0\n np.array([0, 0, 255], dtype=np.uint8),\n # cv2.GC_FGD == 1\n np.array([0, 255, 0], dtype=np.uint8),\n # cv2.GC_PR_BGD == 2\n np.array([0, 0, 120], dtype=np.uint8),\n # cv2.GC_PR_FGD == 3\n np.array([0, 120, 0], dtype=np.uint8),\n )\n # NONE mean none of (BGD/FGD/PR_BGD/PR_FGD)\n self.GC_NONE = 255\n # mask layer alpha\n self.alpha = 0.3\n\n self.imgPath = Path.cwd()\n self.penSize = 40\n\n # init ui order matter\n self.initUI()\n\n def grabCut(self, iterCount):\n if self.img is None:\n self.showMessage(\"No image\")\n return\n\n # avoid grabCut crash\n if not np.any((self.mask == cv2.GC_FGD) | (self.mask == cv2.GC_PR_FGD)):\n self.showMessage(\"no GC_FGD or GC_PR_FGD\")\n return\n\n # before grabcut, save mask to stack\n self.pushMask()\n bgdModel = np.zeros((1, 65), np.float64)\n fgdModel = np.zeros((1, 65), np.float64)\n _ = cv2.grabCut(self.img, self.mask, None, bgdModel,\n fgdModel, iterCount, cv2.GC_INIT_WITH_MASK)\n self.drawPartialImgWithMask(self.masks[-1], self.mask)\n\n # display result\n self.ui.displayResultAction.setChecked(True)\n self.repaint()\n\n def drawingMask(self, x1, y1, x2, y2):\n \"\"\"drawing an small partial of the mask layer,\n which is a small line segment.\n \"\"\"\n if self.img is None:\n return\n # when hidden mask or display result, don't draw mask\n if self.ui.hiddenMaskAction.isChecked() or \\\n self.ui.displayResultAction.isChecked():\n return\n\n if self.ui.prFgdAction.isChecked():\n mode = cv2.GC_PR_FGD\n elif self.ui.prBgdAction.isChecked():\n mode = cv2.GC_PR_BGD\n elif self.ui.fgdAction.isChecked():\n mode = cv2.GC_FGD\n else: # bgdAction\n mode = cv2.GC_BGD\n\n cv2.line(self.mask, (x1, y1), (x2, y2), mode, self.penSize)\n partialMask = np.zeros(self.mask.shape, np.uint8)\n # GC_BGD is 0, can't use 0 as default\n partialMask.fill(self.GC_NONE)\n cv2.line(partialMask, (x1, y1), (x2, y2), mode, self.penSize)\n\n indices = np.where(partialMask != self.GC_NONE)\n if indices[0].size == 0:\n # nothing new in partialMask\n return\n self.imgWithMask[indices] = (1 - self.alpha)*self.img[indices] + \\\n self.alpha*self.mode2color[mode]\n\n self.repaint()\n\n def pushMask(self):\n \"\"\"push a mask to history list masks for undo.\n \"\"\"\n # if mask hasn't changed\n if len(self.masks) > 0 and np.array_equal(self.masks[-1], self.mask):\n return\n\n self.masks.append(self.mask.copy())\n\n def drawPartialImgWithMask(self, curMask, newMask):\n \"\"\"draw partial imgWithMask.\n\n mask changed from curMask to newMask, only draw the changed part.\n \"\"\"\n # redraw partial imgWithMask\n indices = np.where(curMask != newMask)\n if indices[0].size == 0:\n # two masks are equal\n return\n self.imgWithMask[indices] = (1-self.alpha)*self.img[indices] + \\\n self.alpha*np.array([self.mode2color[m] for m in newMask[indices]])\n\n def getResult(self):\n \"\"\"use mask cuf off forground area as final result.\n \"\"\"\n result_mask = np.where((self.mask == 2) | (\n self.mask == 0), 0, 1).astype('uint8')\n return self.img*result_mask[:, :, np.newaxis]\n\n @pyqtSlot(name=\"on_displayResultAction_triggered\")\n @pyqtSlot(name=\"on_hiddenMaskAction_triggered\")\n def repaint(self):\n \"\"\"repaint cavans.\n \"\"\"\n if self.img is None:\n self.showMessage(\"No image\")\n return\n\n if self.ui.displayResultAction.isChecked():\n img = self.getResult()\n elif self.ui.hiddenMaskAction.isChecked():\n img = self.img\n else:\n img = self.imgWithMask\n\n # convert opencv image to qt image\n height, width, _ = img.shape\n bytesOfLine = 3*width\n image = QImage(img.tobytes(), width, height,\n bytesOfLine, QImage.Format_RGB888).rgbSwapped()\n self.canvas.setPixmap(QPixmap.fromImage(image))\n\n def initUI(self):\n # merge designer ui\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n # right box on toolbar\n rightBox = QWidget(self.ui.toolBar)\n boxLayout = QHBoxLayout()\n\n # grabcut iterCount spinbox\n boxLayout.addWidget(QLabel(\"iterCount\"))\n self.iterCountSpinBox = QSpinBox(self)\n self.iterCountSpinBox.setRange(1, 100)\n self.iterCountSpinBox.setValue(5)\n boxLayout.addWidget(self.iterCountSpinBox)\n\n boxLayout.addStretch(1)\n\n # pen size spinbox\n boxLayout.addWidget(QLabel(\"pen\"))\n self.penSizeSpinBox = QSpinBox(self)\n self.penSizeSpinBox.setRange(1, 500)\n self.penSizeSpinBox.setSingleStep(5)\n self.penSizeSpinBox.setValue(40)\n boxLayout.addWidget(self.penSizeSpinBox)\n\n rightBox.setLayout(boxLayout)\n self.ui.toolBar.addWidget(rightBox)\n\n self.canvas = Canvas(self)\n self.ui.scrollArea.setWidget(self.canvas)\n # canvas align center in scroll area\n self.ui.scrollArea.setAlignment(Qt.AlignCenter)\n # fixed canvas that make it easier to select mask layer\n self.canvas.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n\n # 4 types of mask layer flags\n actionGroup = QActionGroup(self)\n actionGroup.addAction(self.ui.fgdAction)\n actionGroup.addAction(self.ui.bgdAction)\n actionGroup.addAction(self.ui.prFgdAction)\n actionGroup.addAction(self.ui.prBgdAction)\n\n # handle events\n self.ui.exitAction.triggered.connect(self.close)\n self.penSizeSpinBox.valueChanged.connect(self.setPenSize)\n self.iterCountSpinBox.valueChanged.connect(self.setIterCount)\n\n self.ui.opencvAction.triggered.connect(lambda: webbrowser.open(\n 'https://opencv-python-tutroals.readthedocs.io/en/'\n 'latest/py_tutorials/py_imgproc/py_grabcut/py_grabcut.html'\n ))\n\n self.canvas.mousePressed.connect(self.pushMask)\n self.canvas.mouseMoved.connect(self.drawingMask)\n\n self.resetUiToDrawMaskMode()\n\n def resetUiToDrawMaskMode(self):\n \"\"\"reset ui to draw mask mode.\n \"\"\"\n self.ui.prFgdAction.setChecked(True)\n self.ui.displayResultAction.setChecked(False)\n self.ui.hiddenMaskAction.setChecked(False)\n\n def setPenSize(self, v):\n self.penSize = v\n\n def setIterCount(self, v):\n self.iterCount = v\n\n def showMessage(self, msg):\n self.ui.statusbar.showMessage(msg)\n\n @pyqtSlot(name=\"on_openAction_triggered\")\n def openImage(self):\n fileName, _ = QFileDialog.getOpenFileName(\n self, \"Open File\", str(self.imgPath))\n if not fileName:\n return\n\n imgFile = Path(fileName)\n self.imgPath = imgFile.parent\n\n # cv2.imread can't read image that path contain chinese characters,\n # so this is a workaround.\n # self.img = cv2.imread(fileName)\n data = np.fromfile(fileName, dtype=np.uint8)\n self.img = cv2.imdecode(data, cv2.IMREAD_UNCHANGED)\n # discarding alpha channel\n self.img = self.img[:,:,:3]\n self.reset()\n\n @pyqtSlot(name=\"on_saveAction_triggered\")\n def saveResult(self):\n if self.img is None:\n self.showMessage(\"no result to save\")\n return\n\n fileName, _ = QFileDialog.getSaveFileName(\n self, \"Save File\", str(self.imgPath))\n if not fileName:\n return\n\n imgFile = Path(fileName)\n self.imgPath = imgFile.parent\n # default save as png\n if not imgFile.suffix:\n imgFile = imgFile.with_suffix('.png')\n result = self.getResult()\n # cv2.imwrite can't write image that path contain chinese characters.\n im = Image.fromarray(result)\n im.save(imgFile.as_posix())\n\n @pyqtSlot(name=\"on_exportMaskAction_triggered\")\n def exportMask(self):\n if self.mask is None or not self.mask.any():\n self.showMessage(\"no mask\")\n return\n fileName, _ = QFileDialog.getSaveFileName(\n self, \"Save Mask\", str(self.imgPath))\n if not fileName:\n return\n\n imgFile = Path(fileName)\n self.imgPath = imgFile.parent\n # default save as png\n if not imgFile.suffix:\n imgFile = imgFile.with_suffix('.png')\n im = Image.fromarray(self.mask)\n im.save(imgFile.as_posix())\n\n @pyqtSlot(name=\"on_undoAction_triggered\")\n def undo(self):\n if len(self.masks) == 0:\n self.showMessage(\"undo stack is empty\")\n return\n\n prevMask = self.masks.pop()\n self.drawPartialImgWithMask(self.mask, prevMask)\n self.mask = prevMask\n\n # after undo, uncheck display result and hidden mask\n self.resetUiToDrawMaskMode()\n self.repaint()\n\n @pyqtSlot(name=\"on_resetAction_triggered\")\n def reset(self):\n if self.img is None:\n self.showMessage(\"No image\")\n return\n\n self.mask = np.zeros(self.img.shape[:2], np.uint8)\n self.mask.fill(cv2.GC_PR_BGD)\n self.masks = []\n\n # re-create imgWidthMask\n self.imgWithMask = np.zeros(self.img.shape, np.uint8)\n self.imgWithMask[...] = (1-self.alpha)*self.img + \\\n self.alpha*self.mode2color[cv2.GC_PR_BGD]\n\n self.resetUiToDrawMaskMode()\n self.repaint()\n\n @pyqtSlot(name=\"on_grabCutAction_triggered\")\n def runGrabCut(self):\n self.grabCut(self.iterCount)\n\n @pyqtSlot(name=\"on_singleStepAction_triggered\")\n def runGrabCutSingleStep(self):\n self.grabCut(1)\n\n def closeEvent(self, evt):\n # maybe popup a dialog to ask user accept or ignore\n evt.accept()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n app.exec_()\n" ]
[ [ "numpy.fromfile", "numpy.array_equal", "numpy.any", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dongkcs/mindspore
[ "cd7df6dbf463ff3128e9181e9d0c779cecb81320", "cd7df6dbf463ff3128e9181e9d0c779cecb81320", "cd7df6dbf463ff3128e9181e9d0c779cecb81320", "cd7df6dbf463ff3128e9181e9d0c779cecb81320", "cd7df6dbf463ff3128e9181e9d0c779cecb81320", "cd7df6dbf463ff3128e9181e9d0c779cecb81320", "cd7df6dbf463ff3128e9181e9d0c779cecb81320", "59a277756eb4faad9ac9afcc7fd526e8277d4994", "cd7df6dbf463ff3128e9181e9d0c779cecb81320", "cd7df6dbf463ff3128e9181e9d0c779cecb81320", "cd7df6dbf463ff3128e9181e9d0c779cecb81320" ]
[ "model_zoo/official/cv/alexnet/export.py", "mindspore/dataset/text/utils.py", "tests/st/serving/client_example.py", "model_zoo/official/cv/psenet/src/dataset.py", "model_zoo/official/recommend/ncf/src/dataset.py", "model_zoo/official/cv/faster_rcnn/src/FasterRcnn/rpn.py", "mindspore/explainer/benchmark/_attribution/faithfulness.py", "tests/ut/python/dataset/test_cutmix_batch_op.py", "model_zoo/official/cv/mobilenetv2/src/mobilenetV2.py", "mindspore/mindrecord/tools/cifar100_to_mr.py", "model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n##############export checkpoint file into air and onnx models#################\npython export.py\n\"\"\"\nimport argparse\nimport numpy as np\n\nimport mindspore as ms\nfrom mindspore import Tensor\nfrom mindspore import context\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net, export\n\nfrom src.config import alexnet_cifar10_cfg, alexnet_imagenet_cfg\nfrom src.alexnet import AlexNet\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Classification')\n parser.add_argument('--dataset_name', type=str, default='cifar10', choices=['imagenet', 'cifar10'],\n help='please choose dataset: imagenet or cifar10.')\n parser.add_argument('--device_target', type=str, default=\"Ascend\",\n choices=['Ascend', 'GPU'],\n help='device where the code will be implemented (default: Ascend)')\n parser.add_argument('--ckpt_path', type=str, default=\"./ckpt\", help='if is test, must provide\\\n path where the trained ckpt file')\n args_opt = parser.parse_args()\n context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target)\n\n if args_opt.dataset_name == 'cifar10':\n cfg = alexnet_cifar10_cfg\n elif args_opt.dataset_name == 'imagenet':\n cfg = alexnet_imagenet_cfg\n else:\n raise ValueError(\"dataset is not support.\")\n\n net = AlexNet(num_classes=cfg.num_classes)\n\n param_dict = load_checkpoint(args_opt.ckpt_path)\n load_param_into_net(net, param_dict)\n\n input_arr = Tensor(np.random.uniform(0.0, 1.0, size=[1, 3, cfg.image_height, cfg.image_width]), ms.float32)\n export(net, input_arr, file_name=cfg.air_name, file_format=\"AIR\")\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThe module text.utils provides some general methods for NLP text processing.\nFor example, you can use Vocab to build a dictionary,\nuse to_bytes and to_str to encode and decode strings into a specified format.\n\"\"\"\nfrom enum import IntEnum\n\nimport copy\nimport numpy as np\nimport mindspore._c_dataengine as cde\n\nfrom .validators import check_from_file, check_from_list, check_from_dict, check_from_dataset, \\\n check_from_dataset_sentencepiece, check_from_file_sentencepiece, check_save_model\n\n__all__ = [\n \"Vocab\", \"SentencePieceVocab\", \"to_str\", \"to_bytes\"\n]\n\n\nclass Vocab(cde.Vocab):\n \"\"\"\n Vocab object that is used to lookup a word.\n\n It contains a map that maps each word(str) to an id (int).\n \"\"\"\n\n @classmethod\n @check_from_dataset\n def from_dataset(cls, dataset, columns=None, freq_range=None, top_k=None, special_tokens=None,\n special_first=True):\n \"\"\"\n Build a vocab from a dataset.\n\n This would collect all unique words in a dataset and return a vocab within\n the frequency range specified by user in freq_range. User would be warned if no words fall into the frequency.\n Words in vocab are ordered from highest frequency to lowest frequency. Words with the same frequency would be\n ordered lexicographically.\n\n Args:\n dataset(Dataset): dataset to build vocab from.\n columns(list[str], optional): column names to get words from. It can be a list of column names.\n (default=None, where all columns will be used. If any column isn't string type, will return error).\n freq_range(tuple, optional): A tuple of integers (min_frequency, max_frequency). Words within the frequency\n range would be kept. 0 <= min_frequency <= max_frequency <= total_words. min_frequency=0 is the same as\n min_frequency=1. max_frequency > total_words is the same as max_frequency = total_words.\n min_frequency/max_frequency can be None, which corresponds to 0/total_words separately\n (default=None, all words are included).\n top_k(int, optional): top_k > 0. Number of words to be built into vocab. top_k most frequent words are\n taken. top_k is taken after freq_range. If not enough top_k, all words will be taken (default=None,\n all words are included).\n special_tokens(list, optional): a list of strings, each one is a special token. for example\n special_tokens=[\"<pad>\",\"<unk>\"] (default=None, no special tokens will be added).\n special_first(bool, optional): whether special_tokens will be prepended/appended to vocab. If special_tokens\n is specified and special_first is set to True, special_tokens will be prepended (default=True).\n\n Returns:\n Vocab, Vocab object built from dataset.\n \"\"\"\n\n vocab = Vocab()\n if columns is None:\n columns = []\n if not isinstance(columns, list):\n columns = [columns]\n if freq_range is None:\n freq_range = (None, None)\n if special_tokens is None:\n special_tokens = []\n root = copy.deepcopy(dataset).build_vocab(vocab, columns, freq_range, top_k, special_tokens, special_first)\n for d in root.create_dict_iterator(num_epochs=1):\n if d is not None:\n raise ValueError(\"from_dataset should receive data other than None.\")\n return vocab\n\n @classmethod\n @check_from_list\n def from_list(cls, word_list, special_tokens=None, special_first=True):\n \"\"\"\n Build a vocab object from a list of word.\n\n Args:\n word_list(list): a list of string where each element is a word of type string.\n special_tokens(list, optional): a list of strings, each one is a special token. for example\n special_tokens=[\"<pad>\",\"<unk>\"] (default=None, no special tokens will be added).\n special_first(bool, optional): whether special_tokens will be prepended/appended to vocab, If special_tokens\n is specified and special_first is set to True, special_tokens will be prepended (default=True).\n \"\"\"\n if special_tokens is None:\n special_tokens = []\n return super().from_list(word_list, special_tokens, special_first)\n\n @classmethod\n @check_from_file\n def from_file(cls, file_path, delimiter=\"\", vocab_size=None, special_tokens=None, special_first=True):\n \"\"\"\n Build a vocab object from a list of word.\n\n Args:\n file_path (str): path to the file which contains the vocab list.\n delimiter (str, optional): a delimiter to break up each line in file, the first element is taken to be\n the word (default=\"\").\n vocab_size (int, optional): number of words to read from file_path (default=None, all words are taken).\n special_tokens (list, optional): a list of strings, each one is a special token. for example\n special_tokens=[\"<pad>\",\"<unk>\"] (default=None, no special tokens will be added).\n special_first (bool, optional): whether special_tokens will be prepended/appended to vocab,\n If special_tokens is specified and special_first is set to True,\n special_tokens will be prepended (default=True).\n \"\"\"\n if vocab_size is None:\n vocab_size = -1\n if special_tokens is None:\n special_tokens = []\n return super().from_file(file_path, delimiter, vocab_size, special_tokens, special_first)\n\n @classmethod\n @check_from_dict\n def from_dict(cls, word_dict):\n \"\"\"\n Build a vocab object from a dict.\n\n Args:\n word_dict (dict): dict contains word and id pairs, where word should be str and id be int. id is recommended\n to start from 0 and be continuous. ValueError will be raised if id is negative.\n \"\"\"\n\n return super().from_dict(word_dict)\n\n\nclass SentencePieceVocab(cde.SentencePieceVocab):\n \"\"\"\n SentencePiece obiect that is used to segmentate words\n \"\"\"\n @classmethod\n @check_from_dataset_sentencepiece\n def from_dataset(cls, dataset, col_names, vocab_size, character_coverage, model_type, params):\n \"\"\"\n Build a sentencepiece from a dataset\n\n Args:\n dataset(Dataset): Dataset to build sentencepiece.\n col_names(list): The list of the col name.\n vocab_size(int): Vocabulary size.\n character_coverage(float): Amount of characters covered by the model, good defaults are: 0.9995 for\n languages. with rich character set like Japanese or Chinese and 1.0 for other languages with small\n character set.\n model_type(SentencePieceModel): Choose from unigram (default), bpe, char, or word. The input sentence\n must be pretokenized when using word type.\n params(dict): A dictionary with no incoming parameters.\n\n Returns:\n SentencePiece, SentencePiece object from dataset.\n \"\"\"\n\n vocab = SentencePieceVocab()\n root = copy.deepcopy(dataset).build_sentencepiece_vocab(vocab, col_names, vocab_size, character_coverage,\n model_type, params)\n for d in root.create_dict_iterator(num_epochs=1):\n if d is None:\n raise ValueError(\"from_dataset should receive data other than None.\")\n return vocab\n\n @classmethod\n @check_from_file_sentencepiece\n def from_file(cls, file_path, vocab_size, character_coverage, model_type, params):\n \"\"\"\n Build a SentencePiece object from a list of word.\n\n Args:\n file_path(list): Path to the file which contains the sentencepiece list.\n vocab_size(int): Vocabulary size, the type of uint32_t.\n character_coverage(float): Amount of characters covered by the model, good defaults are: 0.9995 for\n languages. with rich character set like Japanse or Chinese and 1.0 for other languages with small\n character set.\n model_type(SentencePieceModel): Choose from unigram (default), bpe, char, or word. The input sentence\n must be pretokenized when using word type.\n params(dict): A dictionary with no incoming parameters(The parameters are derived from SentencePiece\n library).\n\n .. code-block::\n\n input_sentence_size 0\n max_sentencepiece_length 16\n \"\"\"\n return super().from_file(file_path, vocab_size, character_coverage,\n DE_C_INTER_SENTENCEPIECE_MODE[model_type], params)\n\n @classmethod\n @check_save_model\n def save_model(cls, vocab, path, filename):\n \"\"\"\n Save model to filepath\n\n Args:\n vocab(SentencePieceVocab): A sentencepiece object.\n path(str): Path to store model.\n filename(str): The name of the file.\n \"\"\"\n return super().save_model(vocab, path, filename)\n\n\ndef to_str(array, encoding='utf8'):\n \"\"\"\n Convert NumPy array of `bytes` to array of `str` by decoding each element based on charset `encoding`.\n\n Args:\n array (numpy.ndarray): Array of type `bytes` representing strings.\n encoding (str): Indicating the charset for decoding.\n\n Returns:\n numpy.ndarray, NumPy array of `str`.\n \"\"\"\n\n if not isinstance(array, np.ndarray):\n raise ValueError('input should be a NumPy array.')\n\n return np.char.decode(array, encoding)\n\n\ndef to_bytes(array, encoding='utf8'):\n \"\"\"\n Convert NumPy array of `str` to array of `bytes` by encoding each element based on charset `encoding`.\n\n Args:\n array (numpy.ndarray): Array of type `str` representing strings.\n encoding (str): Indicating the charset for encoding.\n\n Returns:\n numpy.ndarray, NumPy array of `bytes`.\n \"\"\"\n\n if not isinstance(array, np.ndarray):\n raise ValueError('input should be a NumPy array.')\n\n return np.char.encode(array, encoding)\n\n\nclass JiebaMode(IntEnum):\n \"\"\"An enumeration for JiebaTokenizer, effective enumeration types are MIX, MP, HMM.\"\"\"\n MIX = 0\n MP = 1\n HMM = 2\n\n\nclass NormalizeForm(IntEnum):\n \"\"\"An enumeration for NormalizeUTF8, effective enumeration types are NONE, NFC, NFKC, NFD, NFKD.\"\"\"\n NONE = 0\n NFC = 1\n NFKC = 2\n NFD = 3\n NFKD = 4\n\n\nclass SentencePieceModel(IntEnum):\n \"\"\"An enumeration for SentencePieceModel, effective enumeration types are UNIGRAM, BPE, CHAR, WORD.\"\"\"\n UNIGRAM = 0\n BPE = 1\n CHAR = 2\n WORD = 3\n\nDE_C_INTER_SENTENCEPIECE_MODE = {\n SentencePieceModel.UNIGRAM: cde.SentencePieceModel.DE_SENTENCE_PIECE_UNIGRAM,\n SentencePieceModel.BPE: cde.SentencePieceModel.DE_SENTENCE_PIECE_BPE,\n SentencePieceModel.CHAR: cde.SentencePieceModel.DE_SENTENCE_PIECE_CHAR,\n SentencePieceModel.WORD: cde.SentencePieceModel.DE_SENTENCE_PIECE_WORD\n}\n\n\nclass SPieceTokenizerOutType(IntEnum):\n \"\"\"An enumeration for SPieceTokenizerOutType, effective enumeration types are STRING, INT.\"\"\"\n STRING = 0\n INT = 1\n\n\nclass SPieceTokenizerLoadType(IntEnum):\n \"\"\"An enumeration for SPieceTokenizerLoadType, effective enumeration types are FILE, MODEL.\"\"\"\n FILE = 0\n MODEL = 1\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport random\nimport json\nimport grpc\nimport numpy as np\nimport requests\nimport ms_service_pb2\nimport ms_service_pb2_grpc\nimport mindspore.dataset as de\nfrom mindspore import Tensor, context\nfrom mindspore import log as logger\nfrom tests.st.networks.models.bert.src.bert_model import BertModel\nfrom .generate_model import bert_net_cfg\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n\nrandom.seed(1)\nnp.random.seed(1)\nde.config.set_seed(1)\n\ndef test_bert():\n MAX_MESSAGE_LENGTH = 0x7fffffff\n input_ids = np.random.randint(0, 1000, size=(2, 32), dtype=np.int32)\n segment_ids = np.zeros((2, 32), dtype=np.int32)\n input_mask = np.zeros((2, 32), dtype=np.int32)\n\n # grpc visit\n channel = grpc.insecure_channel('localhost:5500', options=[('grpc.max_send_message_length', MAX_MESSAGE_LENGTH),\n ('grpc.max_receive_message_length', MAX_MESSAGE_LENGTH)])\n stub = ms_service_pb2_grpc.MSServiceStub(channel)\n request = ms_service_pb2.PredictRequest()\n\n x = request.data.add()\n x.tensor_shape.dims.extend([2, 32])\n x.tensor_type = ms_service_pb2.MS_INT32\n x.data = input_ids.tobytes()\n\n y = request.data.add()\n y.tensor_shape.dims.extend([2, 32])\n y.tensor_type = ms_service_pb2.MS_INT32\n y.data = segment_ids.tobytes()\n\n z = request.data.add()\n z.tensor_shape.dims.extend([2, 32])\n z.tensor_type = ms_service_pb2.MS_INT32\n z.data = input_mask.tobytes()\n\n result = stub.Predict(request)\n grpc_result = np.frombuffer(result.result[0].data, dtype=np.float32).reshape(result.result[0].tensor_shape.dims)\n print(\"ms grpc client received: \")\n print(grpc_result)\n\n # ms result\n net = BertModel(bert_net_cfg, False)\n bert_out = net(Tensor(input_ids), Tensor(segment_ids), Tensor(input_mask))\n print(\"bert out: \")\n print(bert_out[0])\n bert_out_size = len(bert_out)\n\n # compare grpc result\n for i in range(bert_out_size):\n grpc_result = np.frombuffer(result.result[i].data, dtype=np.float32).reshape(result.result[i].tensor_shape.dims)\n logger.info(\"i:{}, grpc_result:{}, bert_out:{}\".\n format(i, result.result[i].tensor_shape.dims, bert_out[i].asnumpy().shape))\n assert np.allclose(bert_out[i].asnumpy(), grpc_result, 0.001, 0.001, equal_nan=True)\n\n # http visit\n data = {\"tensor\": [input_ids.tolist(), segment_ids.tolist(), input_mask.tolist()]}\n url = \"http://127.0.0.1:5501\"\n input_json = json.dumps(data)\n headers = {'Content-type': 'application/json'}\n response = requests.post(url, data=input_json, headers=headers)\n result = response.text\n result = result.replace('\\r', '\\\\r').replace('\\n', '\\\\n')\n result_json = json.loads(result, strict=False)\n http_result = np.array(result_json['tensor'])\n print(\"ms http client received: \")\n print(http_result[0][:200])\n\n # compare http result\n for i in range(bert_out_size):\n logger.info(\"i:{}, http_result:{}, bert_out:{}\".\n format(i, np.shape(http_result[i]), bert_out[i].asnumpy().shape))\n assert np.allclose(bert_out[i].asnumpy(), http_result[i], 0.001, 0.001, equal_nan=True)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\r\n\r\nimport math\r\nimport os\r\nimport random\r\n\r\nimport Polygon as plg\r\nimport cv2\r\nimport numpy as np\r\nimport pyclipper\r\nfrom PIL import Image\r\nfrom src.config import config\r\n\r\nimport mindspore.dataset.engine as de\r\nimport mindspore.dataset.vision.py_transforms as py_transforms\r\n\r\n__all__ = ['train_dataset_creator', 'test_dataset_creator']\r\n\r\ndef get_img(img_path):\r\n img = cv2.imread(img_path)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n return img\r\n\r\ndef get_imgs_names(root_dir):\r\n img_paths = [i for i in os.listdir(root_dir)\r\n if os.path.splitext(i)[-1].lower() in ['.jpg', '.jpeg', '.png']]\r\n return img_paths\r\n\r\ndef get_bboxes(img, gt_path):\r\n h, w = img.shape[0:2]\r\n with open(gt_path, 'r', encoding='utf-8-sig') as f:\r\n lines = f.readlines()\r\n bboxes = []\r\n tags = []\r\n for line in lines:\r\n line = line.replace('\\xef\\xbb\\xbf', '')\r\n line = line.replace('\\ufeff', '')\r\n line = line.replace('\\n', '')\r\n gt = line.split(\",\", 8)\r\n tag = gt[-1][0] != '#'\r\n box = [int(gt[i]) for i in range(8)]\r\n box = np.asarray(box) / ([w * 1.0, h * 1.0] * 4)\r\n bboxes.append(box)\r\n tags.append(tag)\r\n return np.array(bboxes), tags\r\n\r\ndef random_scale(img, min_size):\r\n h, w = img.shape[0:2]\r\n if max(h, w) > 1280:\r\n scale1 = 1280.0 / max(h, w)\r\n img = cv2.resize(img, dsize=None, fx=scale1, fy=scale1)\r\n\r\n h, w = img.shape[0:2]\r\n random_scale1 = np.array([0.5, 1.0, 2.0, 3.0])\r\n scale2 = np.random.choice(random_scale1)\r\n if min(h, w) * scale2 <= min_size:\r\n scale3 = (min_size + 10) * 1.0 / min(h, w)\r\n img = cv2.resize(img, dsize=None, fx=scale3, fy=scale3)\r\n else:\r\n img = cv2.resize(img, dsize=None, fx=scale2, fy=scale2)\r\n return img\r\n\r\ndef random_horizontal_flip(imgs):\r\n if random.random() < 0.5:\r\n for i, _ in enumerate(imgs):\r\n imgs[i] = np.flip(imgs[i], axis=1).copy()\r\n return imgs\r\n\r\ndef random_rotate(imgs):\r\n max_angle = 10\r\n angle = random.random() * 2 * max_angle - max_angle\r\n for i, _ in enumerate(imgs):\r\n img = imgs[i]\r\n w, h = img.shape[:2]\r\n rotation_matrix = cv2.getRotationMatrix2D((h / 2, w / 2), angle, 1)\r\n img_rotation = cv2.warpAffine(img, rotation_matrix, (h, w))\r\n imgs[i] = img_rotation\r\n return imgs\r\n\r\ndef random_crop(imgs, img_size):\r\n h, w = imgs[0].shape[0:2]\r\n th, tw = img_size\r\n if w == tw and h == th:\r\n return imgs\r\n\r\n if random.random() > 3.0 / 8.0 and np.max(imgs[1]) > 0:\r\n tl = np.min(np.where(imgs[1] > 0), axis=1) - img_size\r\n tl[tl < 0] = 0\r\n br = np.max(np.where(imgs[1] > 0), axis=1) - img_size\r\n br[br < 0] = 0\r\n br[0] = min(br[0], h - th)\r\n br[1] = min(br[1], w - tw)\r\n\r\n i = random.randint(tl[0], br[0])\r\n j = random.randint(tl[1], br[1])\r\n else:\r\n i = random.randint(0, h - th)\r\n j = random.randint(0, w - tw)\r\n\r\n for idx, _ in enumerate(imgs):\r\n if len(imgs[idx].shape) == 3:\r\n imgs[idx] = imgs[idx][i:i + th, j:j + tw, :]\r\n else:\r\n imgs[idx] = imgs[idx][i:i + th, j:j + tw]\r\n return imgs\r\n\r\ndef scale(img, long_size=2240):\r\n h, w = img.shape[0:2]\r\n scale_long = long_size * 1.0 / max(h, w)\r\n img = cv2.resize(img, dsize=None, fx=scale_long, fy=scale_long)\r\n return img\r\n\r\ndef dist(a, b):\r\n return np.sqrt(np.sum((a - b) ** 2))\r\n\r\ndef perimeter(bbox):\r\n peri = 0.0\r\n for i in range(bbox.shape[0]):\r\n peri += dist(bbox[i], bbox[(i + 1) % bbox.shape[0]])\r\n return peri\r\n\r\ndef shrink(bboxes, rate, max_shr=20):\r\n rate = rate * rate\r\n shrinked_bboxes = []\r\n for bbox in bboxes:\r\n area = plg.Polygon(bbox).area()\r\n peri = perimeter(bbox)\r\n\r\n pco = pyclipper.PyclipperOffset()\r\n pco.AddPath(bbox, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)\r\n offset = min((int)(area * (1 - rate) / (peri + 0.001) + 0.5), max_shr)\r\n\r\n shrinked_bbox = pco.Execute(-offset)\r\n if not shrinked_bbox:\r\n shrinked_bboxes.append(bbox)\r\n continue\r\n\r\n shrinked_bbox = np.array(shrinked_bbox)[0]\r\n if shrinked_bbox.shape[0] <= 2:\r\n shrinked_bboxes.append(bbox)\r\n continue\r\n\r\n shrinked_bboxes.append(shrinked_bbox)\r\n\r\n return np.array(shrinked_bboxes)\r\n\r\nclass TrainDataset:\r\n def __init__(self):\r\n self.is_transform = config.TRAIN_IS_TRANSFORM\r\n self.img_size = config.TRAIN_LONG_SIZE\r\n self.kernel_num = config.KERNEL_NUM\r\n self.min_scale = config.TRAIN_MIN_SCALE\r\n\r\n root_dir = os.path.join(os.path.join(os.path.dirname(__file__), '..'), config.TRAIN_ROOT_DIR)\r\n ic15_train_data_dir = root_dir + 'ch4_training_images/'\r\n ic15_train_gt_dir = root_dir + 'ch4_training_localization_transcription_gt/'\r\n\r\n self.img_size = self.img_size if \\\r\n (self.img_size is None or isinstance(self.img_size, tuple)) \\\r\n else (self.img_size, self.img_size)\r\n\r\n data_dirs = [ic15_train_data_dir]\r\n gt_dirs = [ic15_train_gt_dir]\r\n\r\n self.all_img_paths = []\r\n self.all_gt_paths = []\r\n\r\n for data_dir, gt_dir in zip(data_dirs, gt_dirs):\r\n img_names = [i for i in os.listdir(data_dir)\r\n if os.path.splitext(i)[-1].lower()\r\n in ['.jpg', '.jpeg', '.png']]\r\n\r\n img_paths = []\r\n gt_paths = []\r\n for _, img_name in enumerate(img_names):\r\n img_path = os.path.join(data_dir, img_name)\r\n gt_name = 'gt_' + img_name.split('.')[0] + '.txt'\r\n gt_path = os.path.join(gt_dir, gt_name)\r\n img_paths.append(img_path)\r\n gt_paths.append(gt_path)\r\n\r\n self.all_img_paths.extend(img_paths)\r\n self.all_gt_paths.extend(gt_paths)\r\n\r\n def __getitem__(self, index):\r\n img_path = self.all_img_paths[index]\r\n gt_path = self.all_gt_paths[index]\r\n\r\n img = get_img(img_path)\r\n bboxes, tags = get_bboxes(img, gt_path)\r\n\r\n # multi-scale training\r\n if self.is_transform:\r\n img = random_scale(img, min_size=self.img_size[0])\r\n\r\n # get gt_text and training_mask\r\n img_h, img_w = img.shape[0: 2]\r\n gt_text = np.zeros((img_h, img_w), dtype=np.float32)\r\n training_mask = np.ones((img_h, img_w), dtype=np.float32)\r\n if bboxes.shape[0] > 0:\r\n bboxes = np.reshape(bboxes * ([img_w, img_h] * 4), (bboxes.shape[0], -1, 2)).astype('int32')\r\n for i in range(bboxes.shape[0]):\r\n cv2.drawContours(gt_text, [bboxes[i]], 0, i + 1, -1)\r\n if not tags[i]:\r\n cv2.drawContours(training_mask, [bboxes[i]], 0, 0, -1)\r\n\r\n # get gt_kernels\r\n gt_kernels = []\r\n for i in range(1, self.kernel_num):\r\n rate = 1.0 - (1.0 - self.min_scale) / (self.kernel_num - 1) * i\r\n gt_kernel = np.zeros(img.shape[0:2], dtype=np.float32)\r\n kernel_bboxes = shrink(bboxes, rate)\r\n for j in range(kernel_bboxes.shape[0]):\r\n cv2.drawContours(gt_kernel, [kernel_bboxes[j]], 0, 1, -1)\r\n gt_kernels.append(gt_kernel)\r\n\r\n # data augmentation\r\n if self.is_transform:\r\n imgs = [img, gt_text, training_mask]\r\n imgs.extend(gt_kernels)\r\n imgs = random_horizontal_flip(imgs)\r\n imgs = random_rotate(imgs)\r\n imgs = random_crop(imgs, self.img_size)\r\n img, gt_text, training_mask, gt_kernels = imgs[0], imgs[1], imgs[2], imgs[3:]\r\n\r\n gt_text[gt_text > 0] = 1\r\n gt_kernels = np.array(gt_kernels)\r\n\r\n if self.is_transform:\r\n img = Image.fromarray(img)\r\n img = img.convert('RGB')\r\n img = py_transforms.RandomColorAdjust(brightness=32.0 / 255, saturation=0.5)(img)\r\n else:\r\n img = Image.fromarray(img)\r\n img = img.convert('RGB')\r\n\r\n img = py_transforms.ToTensor()(img)\r\n img = py_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img)\r\n\r\n gt_text = gt_text.astype(np.float32)\r\n gt_kernels = gt_kernels.astype(np.float32)\r\n training_mask = training_mask.astype(np.float32)\r\n\r\n return img, gt_text, gt_kernels, training_mask\r\n\r\n def __len__(self):\r\n return len(self.all_img_paths)\r\n\r\ndef IC15_TEST_Generator():\r\n ic15_test_data_dir = config.TEST_ROOT_DIR + 'ch4_test_images/'\r\n img_size = config.INFER_LONG_SIZE\r\n\r\n img_size = img_size if (img_size is None or isinstance(img_size, tuple)) else (img_size, img_size)\r\n\r\n data_dirs = [ic15_test_data_dir]\r\n all_img_paths = []\r\n\r\n for data_dir in data_dirs:\r\n img_names = [i for i in os.listdir(data_dir) if os.path.splitext(i)[-1].lower() in ['.jpg', '.jpeg', '.png']]\r\n\r\n img_paths = []\r\n for _, img_name in enumerate(img_names):\r\n img_path = data_dir + img_name\r\n img_paths.append(img_path)\r\n\r\n all_img_paths.extend(img_paths)\r\n\r\n dataset_length = len(all_img_paths)\r\n\r\n for index in range(dataset_length):\r\n img_path = all_img_paths[index]\r\n img_name = np.array(os.path.split(img_path)[-1])\r\n img = get_img(img_path)\r\n\r\n long_size = max(img.shape[:2])\r\n img_resized = np.zeros((long_size, long_size, 3), np.uint8)\r\n img_resized[:img.shape[0], :img.shape[1], :] = img\r\n img_resized = cv2.resize(img_resized, dsize=img_size)\r\n\r\n img_resized = Image.fromarray(img_resized)\r\n img_resized = img_resized.convert('RGB')\r\n img_resized = py_transforms.ToTensor()(img_resized)\r\n img_resized = py_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img_resized)\r\n\r\n yield img, img_resized, img_name\r\n\r\nclass DistributedSampler():\r\n def __init__(self, dataset, rank, group_size, shuffle=True, seed=0):\r\n self.dataset = dataset\r\n self.rank = rank\r\n self.group_size = group_size\r\n self.dataset_len = len(self.dataset)\r\n self.num_samplers = int(math.ceil(self.dataset_len * 1.0 / self.group_size))\r\n self.total_size = self.num_samplers * self.group_size\r\n self.shuffle = shuffle\r\n self.seed = seed\r\n\r\n def __iter__(self):\r\n if self.shuffle:\r\n self.seed = (self.seed + 1) & 0xffffffff\r\n np.random.seed(self.seed)\r\n indices = np.random.permutation(self.dataset_len).tolist()\r\n else:\r\n indices = list(range(len(self.dataset_len)))\r\n\r\n indices += indices[:(self.total_size - len(indices))]\r\n indices = indices[self.rank::self.group_size]\r\n return iter(indices)\r\n\r\n def __len__(self):\r\n return self.num_samplers\r\n\r\ndef train_dataset_creator(rank, group_size, shuffle=True):\r\n cv2.setNumThreads(0)\r\n dataset = TrainDataset()\r\n sampler = DistributedSampler(dataset, rank, group_size, shuffle)\r\n ds = de.GeneratorDataset(dataset, ['img', 'gt_text', 'gt_kernels', 'training_mask'], num_parallel_workers=8,\r\n sampler=sampler)\r\n ds = ds.repeat(1)\r\n ds = ds.batch(config.TRAIN_BATCH_SIZE, drop_remainder=config.TRAIN_DROP_REMAINDER)\r\n return ds\r\n\r\ndef test_dataset_creator():\r\n ds = de.GeneratorDataset(IC15_TEST_Generator, ['img', 'img_resized', 'img_name'])\r\n ds = ds.shuffle(config.TEST_BUFFER_SIZE)\r\n ds = ds.batch(1, drop_remainder=config.TEST_DROP_REMAINDER)\r\n return ds\r\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Dataset loading, creation and processing\"\"\"\nimport logging\nimport math\nimport os\nimport time\nimport timeit\nimport pickle\nimport numpy as np\nimport pandas as pd\n\nfrom mindspore.dataset.engine import GeneratorDataset\n\nimport src.constants as rconst\nimport src.movielens as movielens\nimport src.stat_utils as stat_utils\n\n\nDATASET_TO_NUM_USERS_AND_ITEMS = {\n \"ml-1m\": (6040, 3706),\n \"ml-20m\": (138493, 26744)\n}\n\n_EXPECTED_CACHE_KEYS = (\n rconst.TRAIN_USER_KEY, rconst.TRAIN_ITEM_KEY, rconst.EVAL_USER_KEY,\n rconst.EVAL_ITEM_KEY, rconst.USER_MAP, rconst.ITEM_MAP)\n\n\ndef load_data(data_dir, dataset):\n \"\"\"\n Load data in .csv format and output structured data.\n\n This function reads in the raw CSV of positive items, and performs three\n preprocessing transformations:\n\n 1) Filter out all users who have not rated at least a certain number\n of items. (Typically 20 items)\n\n 2) Zero index the users and items such that the largest user_id is\n `num_users - 1` and the largest item_id is `num_items - 1`\n\n 3) Sort the dataframe by user_id, with timestamp as a secondary sort key.\n This allows the dataframe to be sliced by user in-place, and for the last\n item to be selected simply by calling the `-1` index of a user's slice.\n\n While all of these transformations are performed by Pandas (and are therefore\n single-threaded), they only take ~2 minutes, and the overhead to apply a\n MapReduce pattern to parallel process the dataset adds significant complexity\n for no computational gain. For a larger dataset parallelizing this\n preprocessing could yield speedups. (Also, this preprocessing step is only\n performed once for an entire run.\n \"\"\"\n logging.info(\"Beginning loading data...\")\n\n raw_rating_path = os.path.join(data_dir, dataset, movielens.RATINGS_FILE)\n cache_path = os.path.join(data_dir, dataset, rconst.RAW_CACHE_FILE)\n\n valid_cache = os.path.exists(cache_path)\n if valid_cache:\n with open(cache_path, 'rb') as f:\n cached_data = pickle.load(f)\n\n for key in _EXPECTED_CACHE_KEYS:\n if key not in cached_data:\n valid_cache = False\n\n if not valid_cache:\n logging.info(\"Removing stale raw data cache file.\")\n os.remove(cache_path)\n\n if valid_cache:\n data = cached_data\n else:\n # process data and save to .csv\n with open(raw_rating_path) as f:\n df = pd.read_csv(f)\n\n # Get the info of users who have more than 20 ratings on items\n grouped = df.groupby(movielens.USER_COLUMN)\n df = grouped.filter(lambda x: len(x) >= rconst.MIN_NUM_RATINGS)\n\n original_users = df[movielens.USER_COLUMN].unique()\n original_items = df[movielens.ITEM_COLUMN].unique()\n\n # Map the ids of user and item to 0 based index for following processing\n logging.info(\"Generating user_map and item_map...\")\n user_map = {user: index for index, user in enumerate(original_users)}\n item_map = {item: index for index, item in enumerate(original_items)}\n\n df[movielens.USER_COLUMN] = df[movielens.USER_COLUMN].apply(\n lambda user: user_map[user])\n df[movielens.ITEM_COLUMN] = df[movielens.ITEM_COLUMN].apply(\n lambda item: item_map[item])\n\n num_users = len(original_users)\n num_items = len(original_items)\n\n assert num_users <= np.iinfo(rconst.USER_DTYPE).max\n assert num_items <= np.iinfo(rconst.ITEM_DTYPE).max\n assert df[movielens.USER_COLUMN].max() == num_users - 1\n assert df[movielens.ITEM_COLUMN].max() == num_items - 1\n\n # This sort is used to shard the dataframe by user, and later to select\n # the last item for a user to be used in validation.\n logging.info(\"Sorting by user, timestamp...\")\n\n # This sort is equivalent to\n # df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],\n # inplace=True)\n # except that the order of items with the same user and timestamp are\n # sometimes different. For some reason, this sort results in a better\n # hit-rate during evaluation, matching the performance of the MLPerf\n # reference implementation.\n df.sort_values(by=movielens.TIMESTAMP_COLUMN, inplace=True)\n df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],\n inplace=True, kind=\"mergesort\")\n\n # The dataframe does not reconstruct indices in the sort or filter steps.\n df = df.reset_index()\n\n grouped = df.groupby(movielens.USER_COLUMN, group_keys=False)\n eval_df, train_df = grouped.tail(1), grouped.apply(lambda x: x.iloc[:-1])\n\n data = {\n rconst.TRAIN_USER_KEY:\n train_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),\n rconst.TRAIN_ITEM_KEY:\n train_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),\n rconst.EVAL_USER_KEY:\n eval_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),\n rconst.EVAL_ITEM_KEY:\n eval_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),\n rconst.USER_MAP: user_map,\n rconst.ITEM_MAP: item_map,\n \"create_time\": time.time(),\n }\n\n logging.info(\"Writing raw data cache.\")\n with open(cache_path, \"wb\") as f:\n pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n num_users, num_items = DATASET_TO_NUM_USERS_AND_ITEMS[dataset]\n if num_users != len(data[rconst.USER_MAP]):\n raise ValueError(\"Expected to find {} users, but found {}\".format(\n num_users, len(data[rconst.USER_MAP])))\n if num_items != len(data[rconst.ITEM_MAP]):\n raise ValueError(\"Expected to find {} items, but found {}\".format(\n num_items, len(data[rconst.ITEM_MAP])))\n\n return data, num_users, num_items\n\n\ndef construct_lookup_variables(train_pos_users, train_pos_items, num_users):\n \"\"\"Lookup variables\"\"\"\n index_bounds = None\n sorted_train_pos_items = None\n\n def index_segment(user):\n lower, upper = index_bounds[user:user + 2]\n items = sorted_train_pos_items[lower:upper]\n\n negatives_since_last_positive = np.concatenate(\n [items[0][np.newaxis], items[1:] - items[:-1] - 1])\n\n return np.cumsum(negatives_since_last_positive)\n\n start_time = timeit.default_timer()\n inner_bounds = np.argwhere(train_pos_users[1:] -\n train_pos_users[:-1])[:, 0] + 1\n (upper_bound,) = train_pos_users.shape\n index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound])\n\n # Later logic will assume that the users are in sequential ascending order.\n assert np.array_equal(train_pos_users[index_bounds[:-1]], np.arange(num_users))\n\n sorted_train_pos_items = train_pos_items.copy()\n\n for i in range(num_users):\n lower, upper = index_bounds[i:i + 2]\n sorted_train_pos_items[lower:upper].sort()\n\n total_negatives = np.concatenate([\n index_segment(i) for i in range(num_users)])\n\n logging.info(\"Negative total vector built. Time: {:.1f} seconds\".format(\n timeit.default_timer() - start_time))\n\n return total_negatives, index_bounds, sorted_train_pos_items\n\n\nclass NCFDataset:\n \"\"\"\n A dataset for NCF network.\n \"\"\"\n def __init__(self,\n pos_users,\n pos_items,\n num_users,\n num_items,\n batch_size,\n total_negatives,\n index_bounds,\n sorted_train_pos_items,\n is_training=True):\n self._pos_users = pos_users\n self._pos_items = pos_items\n self._num_users = num_users\n self._num_items = num_items\n\n self._batch_size = batch_size\n\n self._total_negatives = total_negatives\n self._index_bounds = index_bounds\n self._sorted_train_pos_items = sorted_train_pos_items\n\n self._is_training = is_training\n\n if self._is_training:\n self._train_pos_count = self._pos_users.shape[0]\n else:\n self._eval_users_per_batch = int(\n batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))\n\n def lookup_negative_items(self, negative_users):\n \"\"\"Lookup negative items\"\"\"\n output = np.zeros(shape=negative_users.shape, dtype=rconst.ITEM_DTYPE) - 1\n\n left_index = self._index_bounds[negative_users]\n right_index = self._index_bounds[negative_users + 1] - 1\n\n num_positives = right_index - left_index + 1\n num_negatives = self._num_items - num_positives\n neg_item_choice = stat_utils.very_slightly_biased_randint(num_negatives)\n\n # Shortcuts:\n # For points where the negative is greater than or equal to the tally before\n # the last positive point there is no need to bisect. Instead the item id\n # corresponding to the negative item choice is simply:\n # last_postive_index + 1 + (neg_choice - last_negative_tally)\n # Similarly, if the selection is less than the tally at the first positive\n # then the item_id is simply the selection.\n #\n # Because MovieLens organizes popular movies into low integers (which is\n # preserved through the preprocessing), the first shortcut is very\n # efficient, allowing ~60% of samples to bypass the bisection. For the same\n # reason, the second shortcut is rarely triggered (<0.02%) and is therefore\n # not worth implementing.\n use_shortcut = neg_item_choice >= self._total_negatives[right_index]\n output[use_shortcut] = (\n self._sorted_train_pos_items[right_index] + 1 +\n (neg_item_choice - self._total_negatives[right_index])\n )[use_shortcut]\n\n if np.all(use_shortcut):\n # The bisection code is ill-posed when there are no elements.\n return output\n\n not_use_shortcut = np.logical_not(use_shortcut)\n left_index = left_index[not_use_shortcut]\n right_index = right_index[not_use_shortcut]\n neg_item_choice = neg_item_choice[not_use_shortcut]\n\n num_loops = np.max(\n np.ceil(np.log2(num_positives[not_use_shortcut])).astype(np.int32))\n\n for _ in range(num_loops):\n mid_index = (left_index + right_index) // 2\n right_criteria = self._total_negatives[mid_index] > neg_item_choice\n left_criteria = np.logical_not(right_criteria)\n\n right_index[right_criteria] = mid_index[right_criteria]\n left_index[left_criteria] = mid_index[left_criteria]\n\n # Expected state after bisection pass:\n # The right index is the smallest index whose tally is greater than the\n # negative item choice index.\n\n assert np.all((right_index - left_index) <= 1)\n\n output[not_use_shortcut] = (\n self._sorted_train_pos_items[right_index] - (self._total_negatives[right_index] - neg_item_choice)\n )\n\n assert np.all(output >= 0)\n\n return output\n\n def _get_train_item(self, index):\n \"\"\"Get train item\"\"\"\n (mask_start_index,) = index.shape\n index_mod = np.mod(index, self._train_pos_count)\n\n # get batch of users\n users = self._pos_users[index_mod]\n\n # get batch of items\n negative_indices = np.greater_equal(index, self._train_pos_count)\n negative_users = users[negative_indices]\n negative_items = self.lookup_negative_items(negative_users=negative_users)\n items = self._pos_items[index_mod]\n items[negative_indices] = negative_items\n\n # get batch of labels\n labels = np.logical_not(negative_indices)\n\n # pad last partial batch\n pad_length = self._batch_size - index.shape[0]\n if pad_length:\n user_pad = np.arange(pad_length, dtype=users.dtype) % self._num_users\n item_pad = np.arange(pad_length, dtype=items.dtype) % self._num_items\n label_pad = np.zeros(shape=(pad_length,), dtype=labels.dtype)\n users = np.concatenate([users, user_pad])\n items = np.concatenate([items, item_pad])\n labels = np.concatenate([labels, label_pad])\n\n users = np.reshape(users, (self._batch_size, 1)) # (_batch_size, 1), int32\n items = np.reshape(items, (self._batch_size, 1)) # (_batch_size, 1), int32\n mask_start_index = np.array(mask_start_index, dtype=np.int32) # (_batch_size, 1), int32\n valid_pt_mask = np.expand_dims(\n np.less(np.arange(self._batch_size), mask_start_index), -1).astype(np.float32) # (_batch_size, 1), bool\n labels = np.reshape(labels, (self._batch_size, 1)).astype(np.int32) # (_batch_size, 1), bool\n\n return users, items, labels, valid_pt_mask\n\n @staticmethod\n def _assemble_eval_batch(users, positive_items, negative_items,\n users_per_batch):\n \"\"\"Construct duplicate_mask and structure data accordingly.\n\n The positive items should be last so that they lose ties. However, they\n should not be masked out if the true eval positive happens to be\n selected as a negative. So instead, the positive is placed in the first\n position, and then switched with the last element after the duplicate\n mask has been computed.\n\n Args:\n users: An array of users in a batch. (should be identical along axis 1)\n positive_items: An array (batch_size x 1) of positive item indices.\n negative_items: An array of negative item indices.\n users_per_batch: How many users should be in the batch. This is passed\n as an argument so that ncf_test.py can use this method.\n\n Returns:\n User, item, and duplicate_mask arrays.\n \"\"\"\n items = np.concatenate([positive_items, negative_items], axis=1)\n\n # We pad the users and items here so that the duplicate mask calculation\n # will include padding. The metric function relies on all padded elements\n # except the positive being marked as duplicate to mask out padded points.\n if users.shape[0] < users_per_batch:\n pad_rows = users_per_batch - users.shape[0]\n padding = np.zeros(shape=(pad_rows, users.shape[1]), dtype=np.int32)\n users = np.concatenate([users, padding.astype(users.dtype)], axis=0)\n items = np.concatenate([items, padding.astype(items.dtype)], axis=0)\n\n duplicate_mask = stat_utils.mask_duplicates(items, axis=1).astype(np.float32)\n\n items[:, (0, -1)] = items[:, (-1, 0)]\n duplicate_mask[:, (0, -1)] = duplicate_mask[:, (-1, 0)]\n\n assert users.shape == items.shape == duplicate_mask.shape\n return users, items, duplicate_mask\n\n def _get_eval_item(self, index):\n \"\"\"Get eval item\"\"\"\n low_index, high_index = index\n users = np.repeat(self._pos_users[low_index:high_index, np.newaxis],\n 1 + rconst.NUM_EVAL_NEGATIVES, axis=1)\n positive_items = self._pos_items[low_index:high_index, np.newaxis]\n negative_items = (self.lookup_negative_items(negative_users=users[:, :-1])\n .reshape(-1, rconst.NUM_EVAL_NEGATIVES))\n\n users, items, duplicate_mask = self._assemble_eval_batch(\n users, positive_items, negative_items, self._eval_users_per_batch)\n\n users = np.reshape(users.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), int32\n items = np.reshape(items.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), int32\n duplicate_mask = np.reshape(duplicate_mask.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), bool\n\n return users, items, duplicate_mask\n\n def __getitem__(self, index):\n \"\"\"\n Get a batch of samples.\n \"\"\"\n if self._is_training:\n return self._get_train_item(index)\n\n return self._get_eval_item(index)\n\n\nclass RandomSampler:\n \"\"\"\n A random sampler for dataset.\n \"\"\"\n def __init__(self, pos_count, num_train_negatives, batch_size):\n self.pos_count = pos_count\n self._num_samples = (1 + num_train_negatives) * self.pos_count\n self._batch_size = batch_size\n self._num_batches = math.ceil(self._num_samples / self._batch_size)\n\n def __iter__(self):\n \"\"\"\n Return indices of all batches within an epoch.\n \"\"\"\n indices = stat_utils.permutation((self._num_samples, stat_utils.random_int32()))\n\n batch_indices = [indices[x * self._batch_size:(x + 1) * self._batch_size] for x in range(self._num_batches)]\n return iter(batch_indices)\n\n def __len__(self):\n \"\"\"\n Return length of the sampler, i.e., the number of batches for an epoch.\n \"\"\"\n return self._num_batches\n\n\nclass DistributedSamplerOfTrain:\n \"\"\"\n A distributed sampler for dataset.\n \"\"\"\n def __init__(self, pos_count, num_train_negatives, batch_size, rank_id, rank_size):\n \"\"\"\n Distributed sampler of training dataset.\n \"\"\"\n self._num_samples = (1 + num_train_negatives) * pos_count\n self._rank_id = rank_id\n self._rank_size = rank_size\n self._batch_size = batch_size\n\n self._batchs_per_rank = int(math.ceil(self._num_samples / self._batch_size / rank_size))\n self._samples_per_rank = int(math.ceil(self._batchs_per_rank * self._batch_size))\n self._total_num_samples = self._samples_per_rank * self._rank_size\n def __iter__(self):\n \"\"\"\n Returns the data after each sampling.\n \"\"\"\n indices = stat_utils.permutation((self._num_samples, stat_utils.random_int32()))\n indices = indices.tolist()\n indices.extend(indices[:self._total_num_samples-len(indices)])\n indices = indices[self._rank_id:self._total_num_samples:self._rank_size]\n batch_indices = [indices[x * self._batch_size:(x + 1) * self._batch_size] for x in range(self._batchs_per_rank)]\n\n return iter(np.array(batch_indices))\n\n def __len__(self):\n \"\"\"\n Returns the length after each sampling.\n \"\"\"\n return self._batchs_per_rank\n\nclass SequenceSampler:\n \"\"\"\n A sequence sampler for dataset.\n \"\"\"\n def __init__(self, eval_batch_size, num_users):\n self._eval_users_per_batch = int(\n eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))\n self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)\n self._eval_batches_per_epoch = self.count_batches(\n self._eval_elements_in_epoch, eval_batch_size)\n\n def __iter__(self):\n indices = [(x * self._eval_users_per_batch, (x + 1) * self._eval_users_per_batch)\n for x in range(self._eval_batches_per_epoch)]\n return iter(indices)\n\n @staticmethod\n def count_batches(example_count, batch_size, batches_per_step=1):\n \"\"\"Determine the number of batches, rounding up to fill all devices.\"\"\"\n x = (example_count + batch_size - 1) // batch_size\n return (x + batches_per_step - 1) // batches_per_step * batches_per_step\n\n def __len__(self):\n \"\"\"\n Return the length of the sampler, i,e, the number of batches in an epoch.\n \"\"\"\n return self._eval_batches_per_epoch\n\nclass DistributedSamplerOfEval:\n \"\"\"\n A distributed sampler for eval dataset.\n \"\"\"\n def __init__(self, eval_batch_size, num_users, rank_id, rank_size):\n self._eval_users_per_batch = int(\n eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))\n self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)\n self._eval_batches_per_epoch = self.count_batches(\n self._eval_elements_in_epoch, eval_batch_size)\n\n self._rank_id = rank_id\n self._rank_size = rank_size\n self._eval_batch_size = eval_batch_size\n\n self._batchs_per_rank = int(math.ceil(self._eval_batches_per_epoch / rank_size))\n #self._samples_per_rank = int(math.ceil(self._batchs_per_rank * self._eval_batch_size))\n #self._total_num_samples = self._samples_per_rank * self._rank_size\n\n def __iter__(self):\n indices = [(x * self._eval_users_per_batch, (x + self._rank_id + 1) * self._eval_users_per_batch)\n for x in range(self._batchs_per_rank)]\n\n return iter(np.array(indices))\n\n @staticmethod\n def count_batches(example_count, batch_size, batches_per_step=1):\n \"\"\"Determine the number of batches, rounding up to fill all devices.\"\"\"\n x = (example_count + batch_size - 1) // batch_size\n return (x + batches_per_step - 1) // batches_per_step * batches_per_step\n\n def __len__(self):\n return self._batchs_per_rank\n\ndef parse_eval_batch_size(eval_batch_size):\n \"\"\"\n Parse eval batch size.\n \"\"\"\n if eval_batch_size % (1 + rconst.NUM_EVAL_NEGATIVES):\n raise ValueError(\"Eval batch size {} is not divisible by {}\".format(\n eval_batch_size, 1 + rconst.NUM_EVAL_NEGATIVES))\n return eval_batch_size\n\n\ndef create_dataset(test_train=True, data_dir='./dataset/', dataset='ml-1m', train_epochs=14, batch_size=256,\n eval_batch_size=160000, num_neg=4, rank_id=None, rank_size=None):\n \"\"\"\n Create NCF dataset.\n \"\"\"\n data, num_users, num_items = load_data(data_dir, dataset)\n\n train_pos_users = data[rconst.TRAIN_USER_KEY]\n train_pos_items = data[rconst.TRAIN_ITEM_KEY]\n eval_pos_users = data[rconst.EVAL_USER_KEY]\n eval_pos_items = data[rconst.EVAL_ITEM_KEY]\n\n total_negatives, index_bounds, sorted_train_pos_items = \\\n construct_lookup_variables(train_pos_users, train_pos_items, num_users)\n\n if test_train:\n print(train_pos_users, train_pos_items, num_users, num_items, batch_size, total_negatives, index_bounds,\n sorted_train_pos_items)\n dataset = NCFDataset(train_pos_users, train_pos_items, num_users, num_items, batch_size, total_negatives,\n index_bounds, sorted_train_pos_items)\n sampler = RandomSampler(train_pos_users.shape[0], num_neg, batch_size)\n if rank_id is not None and rank_size is not None:\n sampler = DistributedSamplerOfTrain(train_pos_users.shape[0], num_neg, batch_size, rank_id, rank_size)\n if dataset == 'ml-20m':\n ds = GeneratorDataset(dataset,\n column_names=[movielens.USER_COLUMN,\n movielens.ITEM_COLUMN,\n \"labels\",\n rconst.VALID_POINT_MASK],\n sampler=sampler, num_parallel_workers=32, python_multiprocessing=False)\n else:\n ds = GeneratorDataset(dataset,\n column_names=[movielens.USER_COLUMN,\n movielens.ITEM_COLUMN,\n \"labels\",\n rconst.VALID_POINT_MASK],\n sampler=sampler)\n\n else:\n eval_batch_size = parse_eval_batch_size(eval_batch_size=eval_batch_size)\n dataset = NCFDataset(eval_pos_users, eval_pos_items, num_users, num_items,\n eval_batch_size, total_negatives, index_bounds,\n sorted_train_pos_items, is_training=False)\n sampler = SequenceSampler(eval_batch_size, num_users)\n\n ds = GeneratorDataset(dataset,\n column_names=[movielens.USER_COLUMN,\n movielens.ITEM_COLUMN,\n rconst.DUPLICATE_MASK],\n sampler=sampler)\n\n repeat_count = train_epochs if test_train else train_epochs + 1\n ds = ds.repeat(repeat_count)\n\n return ds, num_users, num_items\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"RPN for fasterRCNN\"\"\"\nimport numpy as np\nimport mindspore.nn as nn\nimport mindspore.common.dtype as mstype\nfrom mindspore.ops import operations as P\nfrom mindspore import Tensor\nfrom mindspore.ops import functional as F\nfrom mindspore.common.initializer import initializer\nfrom .bbox_assign_sample import BboxAssignSample\n\n\nclass RpnRegClsBlock(nn.Cell):\n \"\"\"\n Rpn reg cls block for rpn layer\n\n Args:\n in_channels (int) - Input channels of shared convolution.\n feat_channels (int) - Output channels of shared convolution.\n num_anchors (int) - The anchor number.\n cls_out_channels (int) - Output channels of classification convolution.\n weight_conv (Tensor) - weight init for rpn conv.\n bias_conv (Tensor) - bias init for rpn conv.\n weight_cls (Tensor) - weight init for rpn cls conv.\n bias_cls (Tensor) - bias init for rpn cls conv.\n weight_reg (Tensor) - weight init for rpn reg conv.\n bias_reg (Tensor) - bias init for rpn reg conv.\n\n Returns:\n Tensor, output tensor.\n \"\"\"\n def __init__(self,\n in_channels,\n feat_channels,\n num_anchors,\n cls_out_channels,\n weight_conv,\n bias_conv,\n weight_cls,\n bias_cls,\n weight_reg,\n bias_reg):\n super(RpnRegClsBlock, self).__init__()\n self.rpn_conv = nn.Conv2d(in_channels, feat_channels, kernel_size=3, stride=1, pad_mode='same',\n has_bias=True, weight_init=weight_conv, bias_init=bias_conv)\n self.relu = nn.ReLU()\n\n self.rpn_cls = nn.Conv2d(feat_channels, num_anchors * cls_out_channels, kernel_size=1, pad_mode='valid',\n has_bias=True, weight_init=weight_cls, bias_init=bias_cls)\n self.rpn_reg = nn.Conv2d(feat_channels, num_anchors * 4, kernel_size=1, pad_mode='valid',\n has_bias=True, weight_init=weight_reg, bias_init=bias_reg)\n\n def construct(self, x):\n x = self.relu(self.rpn_conv(x))\n\n x1 = self.rpn_cls(x)\n x2 = self.rpn_reg(x)\n\n return x1, x2\n\n\nclass RPN(nn.Cell):\n \"\"\"\n ROI proposal network..\n\n Args:\n config (dict) - Config.\n batch_size (int) - Batchsize.\n in_channels (int) - Input channels of shared convolution.\n feat_channels (int) - Output channels of shared convolution.\n num_anchors (int) - The anchor number.\n cls_out_channels (int) - Output channels of classification convolution.\n\n Returns:\n Tuple, tuple of output tensor.\n\n Examples:\n RPN(config=config, batch_size=2, in_channels=256, feat_channels=1024,\n num_anchors=3, cls_out_channels=512)\n \"\"\"\n def __init__(self,\n config,\n batch_size,\n in_channels,\n feat_channels,\n num_anchors,\n cls_out_channels):\n super(RPN, self).__init__()\n cfg_rpn = config\n self.num_bboxes = cfg_rpn.num_bboxes\n self.slice_index = ()\n self.feature_anchor_shape = ()\n self.slice_index += (0,)\n index = 0\n for shape in cfg_rpn.feature_shapes:\n self.slice_index += (self.slice_index[index] + shape[0] * shape[1] * num_anchors,)\n self.feature_anchor_shape += (shape[0] * shape[1] * num_anchors * batch_size,)\n index += 1\n\n self.num_anchors = num_anchors\n self.batch_size = batch_size\n self.test_batch_size = cfg_rpn.test_batch_size\n self.num_layers = 5\n self.real_ratio = Tensor(np.ones((1, 1)).astype(np.float16))\n\n self.rpn_convs_list = nn.layer.CellList(self._make_rpn_layer(self.num_layers, in_channels, feat_channels,\n num_anchors, cls_out_channels))\n\n self.transpose = P.Transpose()\n self.reshape = P.Reshape()\n self.concat = P.Concat(axis=0)\n self.fill = P.Fill()\n self.placeh1 = Tensor(np.ones((1,)).astype(np.float16))\n\n self.trans_shape = (0, 2, 3, 1)\n\n self.reshape_shape_reg = (-1, 4)\n self.reshape_shape_cls = (-1,)\n self.rpn_loss_reg_weight = Tensor(np.array(cfg_rpn.rpn_loss_reg_weight).astype(np.float16))\n self.rpn_loss_cls_weight = Tensor(np.array(cfg_rpn.rpn_loss_cls_weight).astype(np.float16))\n self.num_expected_total = Tensor(np.array(cfg_rpn.num_expected_neg * self.batch_size).astype(np.float16))\n self.num_bboxes = cfg_rpn.num_bboxes\n self.get_targets = BboxAssignSample(cfg_rpn, self.batch_size, self.num_bboxes, False)\n self.CheckValid = P.CheckValid()\n self.sum_loss = P.ReduceSum()\n self.loss_cls = P.SigmoidCrossEntropyWithLogits()\n self.loss_bbox = P.SmoothL1Loss(beta=1.0/9.0)\n self.squeeze = P.Squeeze()\n self.cast = P.Cast()\n self.tile = P.Tile()\n self.zeros_like = P.ZerosLike()\n self.loss = Tensor(np.zeros((1,)).astype(np.float16))\n self.clsloss = Tensor(np.zeros((1,)).astype(np.float16))\n self.regloss = Tensor(np.zeros((1,)).astype(np.float16))\n\n def _make_rpn_layer(self, num_layers, in_channels, feat_channels, num_anchors, cls_out_channels):\n \"\"\"\n make rpn layer for rpn proposal network\n\n Args:\n num_layers (int) - layer num.\n in_channels (int) - Input channels of shared convolution.\n feat_channels (int) - Output channels of shared convolution.\n num_anchors (int) - The anchor number.\n cls_out_channels (int) - Output channels of classification convolution.\n\n Returns:\n List, list of RpnRegClsBlock cells.\n \"\"\"\n rpn_layer = []\n\n shp_weight_conv = (feat_channels, in_channels, 3, 3)\n shp_bias_conv = (feat_channels,)\n weight_conv = initializer('Normal', shape=shp_weight_conv, dtype=mstype.float16).to_tensor()\n bias_conv = initializer(0, shape=shp_bias_conv, dtype=mstype.float16).to_tensor()\n\n shp_weight_cls = (num_anchors * cls_out_channels, feat_channels, 1, 1)\n shp_bias_cls = (num_anchors * cls_out_channels,)\n weight_cls = initializer('Normal', shape=shp_weight_cls, dtype=mstype.float16).to_tensor()\n bias_cls = initializer(0, shape=shp_bias_cls, dtype=mstype.float16).to_tensor()\n\n shp_weight_reg = (num_anchors * 4, feat_channels, 1, 1)\n shp_bias_reg = (num_anchors * 4,)\n weight_reg = initializer('Normal', shape=shp_weight_reg, dtype=mstype.float16).to_tensor()\n bias_reg = initializer(0, shape=shp_bias_reg, dtype=mstype.float16).to_tensor()\n\n for i in range(num_layers):\n rpn_layer.append(RpnRegClsBlock(in_channels, feat_channels, num_anchors, cls_out_channels, \\\n weight_conv, bias_conv, weight_cls, \\\n bias_cls, weight_reg, bias_reg))\n\n for i in range(1, num_layers):\n rpn_layer[i].rpn_conv.weight = rpn_layer[0].rpn_conv.weight\n rpn_layer[i].rpn_cls.weight = rpn_layer[0].rpn_cls.weight\n rpn_layer[i].rpn_reg.weight = rpn_layer[0].rpn_reg.weight\n\n rpn_layer[i].rpn_conv.bias = rpn_layer[0].rpn_conv.bias\n rpn_layer[i].rpn_cls.bias = rpn_layer[0].rpn_cls.bias\n rpn_layer[i].rpn_reg.bias = rpn_layer[0].rpn_reg.bias\n\n return rpn_layer\n\n def construct(self, inputs, img_metas, anchor_list, gt_bboxes, gt_labels, gt_valids):\n loss_print = ()\n rpn_cls_score = ()\n rpn_bbox_pred = ()\n rpn_cls_score_total = ()\n rpn_bbox_pred_total = ()\n\n for i in range(self.num_layers):\n x1, x2 = self.rpn_convs_list[i](inputs[i])\n\n rpn_cls_score_total = rpn_cls_score_total + (x1,)\n rpn_bbox_pred_total = rpn_bbox_pred_total + (x2,)\n\n x1 = self.transpose(x1, self.trans_shape)\n x1 = self.reshape(x1, self.reshape_shape_cls)\n\n x2 = self.transpose(x2, self.trans_shape)\n x2 = self.reshape(x2, self.reshape_shape_reg)\n\n rpn_cls_score = rpn_cls_score + (x1,)\n rpn_bbox_pred = rpn_bbox_pred + (x2,)\n\n loss = self.loss\n clsloss = self.clsloss\n regloss = self.regloss\n bbox_targets = ()\n bbox_weights = ()\n labels = ()\n label_weights = ()\n\n output = ()\n if self.training:\n for i in range(self.batch_size):\n multi_level_flags = ()\n anchor_list_tuple = ()\n\n for j in range(self.num_layers):\n res = self.cast(self.CheckValid(anchor_list[j], self.squeeze(img_metas[i:i + 1:1, ::])),\n mstype.int32)\n multi_level_flags = multi_level_flags + (res,)\n anchor_list_tuple = anchor_list_tuple + (anchor_list[j],)\n\n valid_flag_list = self.concat(multi_level_flags)\n anchor_using_list = self.concat(anchor_list_tuple)\n\n gt_bboxes_i = self.squeeze(gt_bboxes[i:i + 1:1, ::])\n gt_labels_i = self.squeeze(gt_labels[i:i + 1:1, ::])\n gt_valids_i = self.squeeze(gt_valids[i:i + 1:1, ::])\n\n bbox_target, bbox_weight, label, label_weight = self.get_targets(gt_bboxes_i,\n gt_labels_i,\n self.cast(valid_flag_list,\n mstype.bool_),\n anchor_using_list, gt_valids_i)\n\n bbox_weight = self.cast(bbox_weight, mstype.float16)\n label = self.cast(label, mstype.float16)\n label_weight = self.cast(label_weight, mstype.float16)\n\n for j in range(self.num_layers):\n begin = self.slice_index[j]\n end = self.slice_index[j + 1]\n stride = 1\n bbox_targets += (bbox_target[begin:end:stride, ::],)\n bbox_weights += (bbox_weight[begin:end:stride],)\n labels += (label[begin:end:stride],)\n label_weights += (label_weight[begin:end:stride],)\n\n for i in range(self.num_layers):\n bbox_target_using = ()\n bbox_weight_using = ()\n label_using = ()\n label_weight_using = ()\n\n for j in range(self.batch_size):\n bbox_target_using += (bbox_targets[i + (self.num_layers * j)],)\n bbox_weight_using += (bbox_weights[i + (self.num_layers * j)],)\n label_using += (labels[i + (self.num_layers * j)],)\n label_weight_using += (label_weights[i + (self.num_layers * j)],)\n\n bbox_target_with_batchsize = self.concat(bbox_target_using)\n bbox_weight_with_batchsize = self.concat(bbox_weight_using)\n label_with_batchsize = self.concat(label_using)\n label_weight_with_batchsize = self.concat(label_weight_using)\n\n # stop\n bbox_target_ = F.stop_gradient(bbox_target_with_batchsize)\n bbox_weight_ = F.stop_gradient(bbox_weight_with_batchsize)\n label_ = F.stop_gradient(label_with_batchsize)\n label_weight_ = F.stop_gradient(label_weight_with_batchsize)\n\n cls_score_i = rpn_cls_score[i]\n reg_score_i = rpn_bbox_pred[i]\n\n loss_cls = self.loss_cls(cls_score_i, label_)\n loss_cls_item = loss_cls * label_weight_\n loss_cls_item = self.sum_loss(loss_cls_item, (0,)) / self.num_expected_total\n\n loss_reg = self.loss_bbox(reg_score_i, bbox_target_)\n bbox_weight_ = self.tile(self.reshape(bbox_weight_, (self.feature_anchor_shape[i], 1)), (1, 4))\n loss_reg = loss_reg * bbox_weight_\n loss_reg_item = self.sum_loss(loss_reg, (1,))\n loss_reg_item = self.sum_loss(loss_reg_item, (0,)) / self.num_expected_total\n\n loss_total = self.rpn_loss_cls_weight * loss_cls_item + self.rpn_loss_reg_weight * loss_reg_item\n\n loss += loss_total\n loss_print += (loss_total, loss_cls_item, loss_reg_item)\n clsloss += loss_cls_item\n regloss += loss_reg_item\n\n output = (loss, rpn_cls_score_total, rpn_bbox_pred_total, clsloss, regloss, loss_print)\n else:\n output = (self.placeh1, rpn_cls_score_total, rpn_bbox_pred_total, self.placeh1, self.placeh1, self.placeh1)\n\n return output\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Faithfulness\"\"\"\nimport math\nfrom typing import Callable, Optional, Union, Tuple\n\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter\n\nimport mindspore as ms\nimport mindspore.nn as nn\nimport mindspore.ops.operations as op\nfrom .metric import AttributionMetric\nfrom ..._utils import calc_correlation, calc_auc, format_tensor_to_ndarray, rank_pixels\nfrom ...explanation._attribution._attribution import Attribution as _Attribution\n\n_Array = np.ndarray\n_Explainer = Union[_Attribution, Callable]\n_Label = Union[int, ms.Tensor]\n_Module = nn.Cell\n\n\ndef _calc_feature_importance(saliency: _Array, masks: _Array) -> _Array:\n \"\"\"Calculate feature important w.r.t given masks.\"\"\"\n feature_importance = []\n num_perturbations = masks.shape[0]\n for i in range(num_perturbations):\n patch_feature_importance = saliency[masks[i]].sum() / masks[i].sum()\n feature_importance.append(patch_feature_importance)\n feature_importance = np.array(feature_importance, dtype=np.float32)\n return feature_importance\n\n\nclass _BaseReplacement:\n \"\"\"\n Base class of generator for generating different replacement for perturbations.\n\n Args:\n kwargs: Optional args for generating replacement. Derived class need to\n add necessary arg names and default value to '_necessary_args'.\n If the argument has no default value, the value should be set to\n 'EMPTY' to mark the required args. Initializing an object will\n check the given kwargs w.r.t '_necessary_args'.\n\n Raise:\n ValueError: Raise when provided kwargs not contain necessary arg names with 'EMPTY' mark.\n \"\"\"\n _necessary_args = {}\n\n def __init__(self, **kwargs):\n self._replace_args = self._necessary_args.copy()\n for key, value in self._replace_args.items():\n if key in kwargs.keys():\n self._replace_args[key] = kwargs[key]\n elif key not in kwargs.keys() and value == 'EMPTY':\n raise ValueError(f\"Missing keyword arg {key} for {self.__class__.__name__}.\")\n\n __call__: Callable\n \"\"\"\n Generate replacement for perturbations. Derived class should overwrite this\n function to generate different replacement for perturbing.\n\n Args:\n inputs (_Array): Array to be perturb.\n\n Returns:\n - replacement (_Array): Array to provide alternative pixels for every\n position in the given\n inputs. The returned array should have same shape as inputs.\n \"\"\"\n\n\nclass Constant(_BaseReplacement):\n \"\"\" Generator to provide constant-value replacement for perturbations \"\"\"\n _necessary_args = {'base_value': 'EMPTY'}\n\n def __call__(self, inputs: _Array) -> _Array:\n replacement = np.ones_like(inputs, dtype=np.float32)\n replacement *= self._replace_args['base_value']\n return replacement\n\n\nclass GaussianBlur(_BaseReplacement):\n \"\"\" Generator to provided gaussian blurred inputs for perturbation. \"\"\"\n _necessary_args = {'sigma': 0.7}\n\n def __call__(self, inputs: _Array) -> _Array:\n sigma = self._replace_args['sigma']\n replacement = gaussian_filter(inputs, sigma=sigma)\n return replacement\n\n\nclass Perturb:\n \"\"\"\n Perturbation generator to generate perturbations for a given array.\n\n Args:\n perturb_percent (float): percentage of pixels to perturb\n perturb_mode (str): specify perturbing mode, through deleting or\n inserting pixels. Current support: ['Deletion', 'Insertion'].\n is_accumulate (bool): whether to accumulate the former perturbations to\n the later perturbations.\n perturb_pixel_per_step (int, optional): number of pixel to perturb\n for each perturbation. If perturb_pixel_per_step is None, actual\n perturb_pixel_per_step will be calculate by:\n num_image_pixel * perturb_percent / num_perturb_steps.\n Default: None\n num_perturbations (int, optional): number of perturbations. If\n num_perturbations if None, it will be calculated by:\n num_image_pixel * perturb_percent / perturb_pixel_per_step.\n Default: None\n\n \"\"\"\n\n def __init__(self,\n perturb_percent: float,\n perturb_mode: str,\n is_accumulate: bool,\n perturb_pixel_per_step: Optional[int] = None,\n num_perturbations: Optional[int] = None):\n self._perturb_percent = perturb_percent\n self._perturb_mode = perturb_mode\n self._pixel_per_step = perturb_pixel_per_step\n self._num_perturbations = num_perturbations\n self._is_accumulate = is_accumulate\n\n @staticmethod\n def _assign(x: _Array, y: _Array, masks: _Array):\n \"\"\"Assign values to perturb pixels on perturbations.\"\"\"\n if masks.dtype != bool:\n raise TypeError('The param \"masks\" should be an array of bool, but receive {}'\n .format(masks.dtype))\n for i in range(x.shape[0]):\n x[i][:, masks[i]] = y[:, masks[i]]\n\n def _generate_mask(self, saliency_rank: _Array) -> _Array:\n \"\"\"Generate mask for perturbations based on given saliency ranks.\"\"\"\n if len(saliency_rank.shape) != 2:\n raise ValueError(f'The param \"saliency_rank\" should be 2-dim, but receive {len(saliency_rank.shape)}.')\n\n num_pixels = saliency_rank.shape[0] * saliency_rank.shape[1]\n if self._pixel_per_step:\n pixel_per_step = self._pixel_per_step\n num_perturbations = math.floor(\n num_pixels * self._perturb_percent / self._pixel_per_step)\n elif self._num_perturbations:\n pixel_per_step = math.floor(\n num_pixels * self._perturb_percent / self._num_perturbations)\n num_perturbations = self._num_perturbations\n else:\n raise ValueError(\"Must provide either pixel_per_step or num_perturbations.\")\n\n masks = np.zeros(\n (num_perturbations, saliency_rank.shape[0], saliency_rank.shape[1]),\n dtype=np.bool)\n low_bound = 0\n up_bound = low_bound + pixel_per_step\n factor = 0 if self._is_accumulate else 1\n\n for i in range(num_perturbations):\n masks[i, ((saliency_rank >= low_bound)\n & (saliency_rank < up_bound))] = True\n low_bound = up_bound * factor\n up_bound += pixel_per_step\n\n if len(masks.shape) == 3:\n return masks\n raise ValueError(f'Invalid masks shape {len(masks.shape)}, expect 3-dim.')\n\n def __call__(self,\n inputs: _Array,\n saliency: _Array,\n reference: _Array,\n return_mask: bool = False,\n ) -> Union[_Array, Tuple[_Array, ...]]:\n \"\"\"\n Generate perturbations of given array.\n\n Args:\n inputs (_Array): input array to perturb\n saliency (_Array): saliency map\n return_mask (bool): whether return the mask for generating\n the perturbation. The mask can be used to calculate\n average feature importance of pixels perturbed at each step.\n\n Return:\n perturbations (_Array)\n masks (_Array): return when return_mask is set to True.\n \"\"\"\n if not np.array_equal(inputs.shape, reference.shape):\n raise ValueError('reference must have the same shape as inputs.')\n\n saliency_rank = rank_pixels(saliency, descending=True)\n masks = self._generate_mask(saliency_rank)\n num_perturbations = masks.shape[0]\n\n if self._perturb_mode == 'Insertion':\n inputs, reference = reference, inputs\n\n perturbations = np.tile(\n inputs, (num_perturbations, *[1] * len(inputs.shape)))\n\n Perturb._assign(perturbations, reference, masks)\n\n if return_mask:\n return perturbations, masks\n return perturbations\n\n\nclass _FaithfulnessHelper:\n \"\"\"Base class for faithfulness calculator.\"\"\"\n _support = [Constant, GaussianBlur]\n\n def __init__(self,\n perturb_percent: float,\n perturb_mode: str,\n perturb_method: str,\n is_accumulate: bool,\n perturb_pixel_per_step: Optional[int] = None,\n num_perturbations: Optional[int] = None,\n **kwargs):\n\n self._get_reference = None\n for method in self._support:\n if perturb_method == method.__name__:\n self._get_reference = method(**kwargs)\n if self._get_reference is None:\n raise ValueError(\n 'The param \"perturb_method\" should be one of {}.'.format([x.__name__ for x in self._support]))\n\n self._perturb = Perturb(perturb_percent=perturb_percent,\n perturb_mode=perturb_mode,\n perturb_pixel_per_step=perturb_pixel_per_step,\n num_perturbations=num_perturbations,\n is_accumulate=is_accumulate)\n\n calc_faithfulness: Callable\n \"\"\"\n Method used to calculate faithfulness for given inputs, target label,\n saliency. Derive class should implement this method.\n\n Args:\n inputs (_Array): sample to calculate faithfulness score\n model (_Module): model to explanation\n targets (_Label): label to explanation on.\n saliency (_Array): Saliency map of given inputs and targets from the\n explainer.\n\n Return:\n - faithfulness (float): faithfulness score\n \"\"\"\n\n\nclass NaiveFaithfulness(_FaithfulnessHelper):\n \"\"\"\n Calculator for naive faithfulness.\n\n Naive faithfulness, the metric replace several pixels on original image by\n specific method for each perturbations. The metric predicts on the perturbed\n images and record a series of probabilities. Then calculates the\n correlation between prob distribution and averaged feature importance.\n Higher correlation indicates better faithfulness.\n\n Args:\n perturb_percent (float): percentage of pixels to perturb\n perturb_method (str): specify the method to replace the pixel.\n Current support: ['Constant', 'GaussianBlur']\n is_accumulate (bool): whether to accumulate the former perturbations to\n the later perturbations.\n Default: False.\n perturb_pixel_per_step (Optional[int]): number of pixel to perturb\n for each perturbation. If perturb_pixel_per_step is None, actual\n perturb_pixel_per_step will be calculate by:\n num_image_pixel * perturb_percent / num_perturb_steps.\n Default: None\n num_perturbations (Optional[int]): number of perturbations. If\n num_perturbations if None, it will be calculated by:\n num_image_pixel * perturb_percent / perturb_pixel_per_step.\n Default: None\n kwargs: specific perturb_method will require\n different arguments. Below lists required args for each method.\n\n 'Constant': base_value (int)\n 'GaussianBlur': sigma (float): 0.7\n\n \"\"\"\n\n def __init__(self,\n perturb_percent: float,\n perturb_method: str,\n is_accumulate: bool = False,\n perturb_pixel_per_step: Optional[int] = None,\n num_perturbations: Optional[int] = None,\n **kwargs):\n super(NaiveFaithfulness, self).__init__(\n perturb_percent=perturb_percent,\n perturb_mode='Deletion',\n perturb_method=perturb_method,\n is_accumulate=is_accumulate,\n perturb_pixel_per_step=perturb_pixel_per_step,\n num_perturbations=num_perturbations,\n **kwargs)\n\n def calc_faithfulness(self,\n inputs: _Array,\n model: _Module,\n targets: _Label,\n saliency: _Array) -> np.ndarray:\n \"\"\"\n Calculate naive faithfulness.\n\n Args:\n inputs (_Array): sample to calculate faithfulness score\n model (_Module): model to explanation\n targets (_Label): label to explanation on.\n saliency (_Array): Saliency map of given inputs and targets from the\n explainer.\n\n Return:\n - faithfulness (np.ndarray): faithfulness score\n\n \"\"\"\n reference = self._get_reference(inputs)\n perturbations, masks = self._perturb(\n inputs, saliency, reference, return_mask=True)\n feature_importance = _calc_feature_importance(saliency, masks)\n\n perturbations = ms.Tensor(perturbations, dtype=ms.float32)\n predictions = model(perturbations).asnumpy()[:, targets]\n\n faithfulness = calc_correlation(feature_importance, predictions)\n normalized_faithfulness = (faithfulness + 1) / 2\n return np.array([normalized_faithfulness], np.float)\n\n\nclass DeletionAUC(_FaithfulnessHelper):\n \"\"\" Calculator for deletion AUC.\n\n For Deletion AUC, the metric accumulative replace pixels on origin\n images through specific 'perturb_method', predict on the perturbed images\n and record series of probabilities. The metric then calculates the AUC of\n the probability variation curve during perturbations. Faithfulness is define\n as (1 - deletion_AUC). Higher score indicates better faithfulness of\n explanation.\n\n Args:\n perturb_percent (float): percentage of pixels to perturb\n perturb_method (str): specify the method to replace the pixel.\n Current support: ['Constant', 'GaussianBlur']\n perturb_pixel_per_step (Optional[int]): number of pixel to perturb\n for each perturbation. If perturb_pixel_per_step is None, actual\n perturb_pixel_per_step will be calculate by:\n num_image_pixel * perturb_percent / num_perturb_steps.\n Default: None\n num_perturbations (Optional[int]): number of perturbations. If\n num_perturbations if None, it will be calculated by:\n num_image_pixel * perterb_percent / perturb_pixel_per_step.\n Default: None\n kwargs: specific perturb_method will require\n different arguments. Below lists required args for each method.\n\n 'Constant': base_value (int)\n 'GaussianBlur': sigma (float): 0.7\n\n \"\"\"\n\n def __init__(self,\n perturb_percent: float,\n perturb_method: str,\n perturb_pixel_per_step: Optional[int] = None,\n num_perturbations: Optional[int] = None,\n **kwargs):\n super(DeletionAUC, self).__init__(\n perturb_percent=perturb_percent,\n perturb_mode='Deletion',\n perturb_method=perturb_method,\n perturb_pixel_per_step=perturb_pixel_per_step,\n num_perturbations=num_perturbations,\n is_accumulate=True,\n **kwargs)\n\n def calc_faithfulness(self,\n inputs: _Array,\n model: _Module,\n targets: _Label,\n saliency: _Array) -> np.ndarray:\n \"\"\"\n Calculate faithfulness through deletion AUC.\n\n Args:\n inputs (_Array): sample to calculate faithfulness score\n model (_Module): model to explanation\n targets (_Label): label to explanation on.\n saliency (_Array): Saliency map of given inputs and targets from the\n explainer.\n\n Return:\n - faithfulness (float): faithfulness score\n\n \"\"\"\n reference = self._get_reference(inputs)\n perturbations = self._perturb(inputs, saliency, reference)\n perturbations = ms.Tensor(perturbations, dtype=ms.float32)\n predictions = model(perturbations).asnumpy()[:, targets]\n input_tensor = op.ExpandDims()(ms.Tensor(inputs, ms.float32), 0)\n original_output = model(input_tensor).asnumpy()[:, targets]\n\n auc = calc_auc(original_output - predictions)\n return np.array([1 - auc])\n\n\nclass InsertionAUC(_FaithfulnessHelper):\n \"\"\" Calculator for insertion AUC.\n\n For Insertion AUC, the metric accumulative replace pixels of reference\n image by pixels from origin image, like inserting pixel from origin image to\n reference. The reference if generated through specific 'perturb_method'.\n The metric predicts on the perturbed images and records series of\n probabilities. The metric then calculates the AUC of the probability\n variation curve during perturbations. Faithfulness is define as (1 -\n deletion_AUC). Higher score indicates better faithfulness of explanation.\n\n Args:\n perturb_percent (float): percentage of pixels to perturb\n perturb_method (str): specify the method to replace the pixel.\n Current support: ['Constant', 'GaussianBlur']\n perturb_pixel_per_step (Optional[int]): number of pixel to perturb\n for each perturbation. If perturb_pixel_per_step is None, actual\n perturb_pixel_per_step will be calculate by:\n num_image_pixel * perturb_percent / num_perturb_steps.\n Default: None\n num_perturbations (Optional[int]): number of perturbations. If\n num_perturbations if None, it will be calculated by:\n num_image_pixel * perterb_percent / perturb_pixel_per_step.\n Default: None\n kwargs: specific perturb_method will require\n different arguments. Below lists required args for each method.\n\n 'Constant': base_value (int)\n 'GaussianBlur': sigma (float): 0.7\n\n \"\"\"\n\n def __init__(self,\n perturb_percent: float,\n perturb_method: str,\n perturb_pixel_per_step: Optional[int] = None,\n num_perturbations: Optional[int] = None,\n **kwargs):\n super(InsertionAUC, self).__init__(\n perturb_percent=perturb_percent,\n perturb_mode='Insertion',\n perturb_method=perturb_method,\n perturb_pixel_per_step=perturb_pixel_per_step,\n num_perturbations=num_perturbations,\n is_accumulate=True,\n **kwargs)\n\n def calc_faithfulness(self,\n inputs: _Array,\n model: _Module,\n targets: _Label,\n saliency: _Array) -> np.ndarray:\n \"\"\"\n Calculate faithfulness through insertion AUC.\n\n Args:\n inputs (_Array): sample to calculate faithfulness score\n model (_Module): model to explanation\n targets (_Label): label to explanation on.\n saliency (_Array): Saliency map of given inputs and targets from the\n explainer.\n\n Return:\n - faithfulness (float): faithfulness score\n\n \"\"\"\n reference = self._get_reference(inputs)\n perturbations = self._perturb(inputs, saliency, reference)\n perturbations = ms.Tensor(perturbations, dtype=ms.float32)\n predictions = model(perturbations).asnumpy()[:, targets]\n base_tensor = op.ExpandDims()(ms.Tensor(reference, ms.float32), 0)\n base_outputs = model(base_tensor).asnumpy()[:, targets]\n\n auc = calc_auc(predictions - base_outputs)\n return np.array([auc])\n\n\nclass Faithfulness(AttributionMetric):\n \"\"\"\n Provides evaluation on faithfulness on XAI explanations.\n\n Faithfulness first generate saliency map with given explainers and calculate faithfulness based on different\n faithfulness metric.\n\n Args:\n num_labels (int): number of labels\n metric (str): the specifi metric to quantify faithfulness.\n Options: 'DeletionAUC', 'InsertionAUC', 'NaiveFaithfulness'.\n Default: 'NaiveFaithfulness'.\n\n Examples:\n >>> # init a `Faithfulness` object\n >>> num_labels = 10\n >>> metric = \"InsertionAUC\"\n >>> faithfulness = Faithfulness(num_labels, metric)\n \"\"\"\n _methods = [NaiveFaithfulness, DeletionAUC, InsertionAUC]\n\n def __init__(self, num_labels: int, metric: str = \"NaiveFaithfulness\"):\n super(Faithfulness, self).__init__(num_labels)\n\n perturb_percent = 0.5 # ratio of pixels to be perturbed, future argument\n perturb_method = \"Constant\" # perturbation method, all the perturbed pixels will be set to constant\n num_perturb_pixel_per_step = None # number of pixels for each perturbation step\n num_perturb_steps = 100 # separate the perturbation progress in to 100 steps.\n base_value = 0.0 # the pixel value set for the perturbed pixels\n\n self._verify_metrics(metric)\n for method in self._methods:\n if metric == method.__name__:\n self._faithfulness_helper = method(\n perturb_percent=perturb_percent,\n perturb_method=perturb_method,\n perturb_pixel_per_step=num_perturb_pixel_per_step,\n num_perturbations=num_perturb_steps,\n base_value=base_value\n )\n\n def evaluate(self, explainer, inputs, targets, saliency=None):\n \"\"\"\n Evaluate faithfulness on a single data sample.\n\n Args:\n explainer (Explainer): A explainer instance object.\n The 'Explainer' object see mindspore/explainer/explanation.\n inputs (Tensor): data sample. Currently only support single sample at each call.\n targets (Union[int, Tensor]): A target label to evaluate on.\n saliency (Tensor): A saliency tensor.\n\n Return:\n np.ndarray: result of faithfulness evaluated on explainer.\n\n Notes:\n To apply `Faithfulness` to evaluate an explainer, this explainer must be initialize with a network that\n contains the output activation function. Otherwise, the results will not be correct.\n\n Examples:\n >>> # init an explainer, the network should contain the output activation function.\n >>> network = nn.SequentialCell([resnet50, nn.Sigmoid()])\n >>> gradient = Gradient(network)\n >>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32)\n >>> targets = 5\n >>> # usage 1: input the explainer and the data to be explained,\n >>> # calculate the faithfulness with the specified metric\n >>> res = faithfulness.evaluate(gradient, inputs, targets)\n >>> # usage 2: input the generated saliency map\n >>> saliency = gradient(inputs, targets)\n >>> res = faithfulenss.evaluate(gradient, inputs, targets, saliency)\n \"\"\"\n\n self._check_evaluate_param(explainer, inputs, targets, saliency)\n\n if saliency is None:\n saliency = explainer(inputs, targets)\n\n inputs = format_tensor_to_ndarray(inputs)\n saliency = format_tensor_to_ndarray(saliency)\n\n inputs = inputs.squeeze(axis=0)\n saliency = saliency.squeeze()\n if len(saliency.shape) != 2:\n raise ValueError('Squeezed saliency map is expected to 2D, but receive {}.'.format(len(saliency.shape)))\n\n faithfulness = self._faithfulness_helper.calc_faithfulness(inputs=inputs, model=explainer.model,\n targets=targets, saliency=saliency)\n return faithfulness\n\n def _verify_metrics(self, metric: str):\n supports = [x.__name__ for x in self._methods]\n if metric not in supports:\n raise ValueError(\"Metric should be one of {}.\".format(supports))\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nTesting the CutMixBatch op in DE\n\"\"\"\nimport numpy as np\nimport pytest\nimport mindspore.dataset as ds\nimport mindspore.dataset.vision.c_transforms as vision\nimport mindspore.dataset.transforms.c_transforms as data_trans\nimport mindspore.dataset.vision.utils as mode\nfrom mindspore import log as logger\nfrom util import save_and_check_md5, diff_mse, visualize_list, config_get_set_seed, \\\n config_get_set_num_parallel_workers\n\nDATA_DIR = \"../data/dataset/testCifar10Data\"\nDATA_DIR2 = \"../data/dataset/testImageNetData2/train/\"\nDATA_DIR3 = \"../data/dataset/testCelebAData/\"\n\nGENERATE_GOLDEN = False\n\n\ndef test_cutmix_batch_success1(plot=False):\n \"\"\"\n Test CutMixBatch op with specified alpha and prob parameters on a batch of CHW images\n \"\"\"\n logger.info(\"test_cutmix_batch_success1\")\n # Original Images\n ds_original = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n ds_original = ds_original.batch(5, drop_remainder=True)\n\n images_original = None\n for idx, (image, _) in enumerate(ds_original):\n if idx == 0:\n images_original = image.asnumpy()\n else:\n images_original = np.append(images_original, image.asnumpy(), axis=0)\n\n # CutMix Images\n data1 = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n hwc2chw_op = vision.HWC2CHW()\n data1 = data1.map(operations=hwc2chw_op, input_columns=[\"image\"])\n one_hot_op = data_trans.OneHot(num_classes=10)\n data1 = data1.map(operations=one_hot_op, input_columns=[\"label\"])\n cutmix_batch_op = vision.CutMixBatch(mode.ImageBatchFormat.NCHW, 2.0, 0.5)\n data1 = data1.batch(5, drop_remainder=True)\n data1 = data1.map(operations=cutmix_batch_op, input_columns=[\"image\", \"label\"])\n\n images_cutmix = None\n for idx, (image, _) in enumerate(data1):\n if idx == 0:\n images_cutmix = image.asnumpy().transpose(0, 2, 3, 1)\n else:\n images_cutmix = np.append(images_cutmix, image.asnumpy().transpose(0, 2, 3, 1), axis=0)\n if plot:\n visualize_list(images_original, images_cutmix)\n\n num_samples = images_original.shape[0]\n mse = np.zeros(num_samples)\n for i in range(num_samples):\n mse[i] = diff_mse(images_cutmix[i], images_original[i])\n logger.info(\"MSE= {}\".format(str(np.mean(mse))))\n\n\ndef test_cutmix_batch_success2(plot=False):\n \"\"\"\n Test CutMixBatch op with default values for alpha and prob on a batch of rescaled HWC images\n \"\"\"\n logger.info(\"test_cutmix_batch_success2\")\n\n # Original Images\n ds_original = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n ds_original = ds_original.batch(5, drop_remainder=True)\n\n images_original = None\n for idx, (image, _) in enumerate(ds_original):\n if idx == 0:\n images_original = image.asnumpy()\n else:\n images_original = np.append(images_original, image.asnumpy(), axis=0)\n\n # CutMix Images\n data1 = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n one_hot_op = data_trans.OneHot(num_classes=10)\n data1 = data1.map(operations=one_hot_op, input_columns=[\"label\"])\n rescale_op = vision.Rescale((1.0 / 255.0), 0.0)\n data1 = data1.map(operations=rescale_op, input_columns=[\"image\"])\n cutmix_batch_op = vision.CutMixBatch(mode.ImageBatchFormat.NHWC)\n data1 = data1.batch(5, drop_remainder=True)\n data1 = data1.map(operations=cutmix_batch_op, input_columns=[\"image\", \"label\"])\n\n images_cutmix = None\n for idx, (image, _) in enumerate(data1):\n if idx == 0:\n images_cutmix = image.asnumpy()\n else:\n images_cutmix = np.append(images_cutmix, image.asnumpy(), axis=0)\n if plot:\n visualize_list(images_original, images_cutmix)\n\n num_samples = images_original.shape[0]\n mse = np.zeros(num_samples)\n for i in range(num_samples):\n mse[i] = diff_mse(images_cutmix[i], images_original[i])\n logger.info(\"MSE= {}\".format(str(np.mean(mse))))\n\n\ndef test_cutmix_batch_success3(plot=False):\n \"\"\"\n Test CutMixBatch op with default values for alpha and prob on a batch of HWC images on ImageFolderDataset\n \"\"\"\n logger.info(\"test_cutmix_batch_success3\")\n\n ds_original = ds.ImageFolderDataset(dataset_dir=DATA_DIR2, shuffle=False)\n decode_op = vision.Decode()\n ds_original = ds_original.map(operations=[decode_op], input_columns=[\"image\"])\n resize_op = vision.Resize([224, 224])\n ds_original = ds_original.map(operations=[resize_op], input_columns=[\"image\"])\n ds_original = ds_original.batch(4, pad_info={}, drop_remainder=True)\n\n images_original = None\n for idx, (image, _) in enumerate(ds_original):\n if idx == 0:\n images_original = image.asnumpy()\n else:\n images_original = np.append(images_original, image.asnumpy(), axis=0)\n\n # CutMix Images\n data1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR2, shuffle=False)\n\n decode_op = vision.Decode()\n data1 = data1.map(operations=[decode_op], input_columns=[\"image\"])\n\n resize_op = vision.Resize([224, 224])\n data1 = data1.map(operations=[resize_op], input_columns=[\"image\"])\n\n one_hot_op = data_trans.OneHot(num_classes=10)\n data1 = data1.map(operations=one_hot_op, input_columns=[\"label\"])\n\n cutmix_batch_op = vision.CutMixBatch(mode.ImageBatchFormat.NHWC)\n data1 = data1.batch(4, pad_info={}, drop_remainder=True)\n data1 = data1.map(operations=cutmix_batch_op, input_columns=[\"image\", \"label\"])\n\n images_cutmix = None\n for idx, (image, _) in enumerate(data1):\n if idx == 0:\n images_cutmix = image.asnumpy()\n else:\n images_cutmix = np.append(images_cutmix, image.asnumpy(), axis=0)\n if plot:\n visualize_list(images_original, images_cutmix)\n\n num_samples = images_original.shape[0]\n mse = np.zeros(num_samples)\n for i in range(num_samples):\n mse[i] = diff_mse(images_cutmix[i], images_original[i])\n logger.info(\"MSE= {}\".format(str(np.mean(mse))))\n\n\ndef test_cutmix_batch_success4(plot=False):\n \"\"\"\n Test CutMixBatch on a dataset where OneHot returns a 2D vector\n \"\"\"\n logger.info(\"test_cutmix_batch_success4\")\n\n ds_original = ds.CelebADataset(DATA_DIR3, shuffle=False)\n decode_op = vision.Decode()\n ds_original = ds_original.map(operations=[decode_op], input_columns=[\"image\"])\n resize_op = vision.Resize([224, 224])\n ds_original = ds_original.map(operations=[resize_op], input_columns=[\"image\"])\n ds_original = ds_original.batch(2, drop_remainder=True)\n\n images_original = None\n for idx, (image, _) in enumerate(ds_original):\n if idx == 0:\n images_original = image.asnumpy()\n else:\n images_original = np.append(images_original, image.asnumpy(), axis=0)\n\n # CutMix Images\n data1 = ds.CelebADataset(dataset_dir=DATA_DIR3, shuffle=False)\n\n decode_op = vision.Decode()\n data1 = data1.map(operations=[decode_op], input_columns=[\"image\"])\n\n resize_op = vision.Resize([224, 224])\n data1 = data1.map(operations=[resize_op], input_columns=[\"image\"])\n\n one_hot_op = data_trans.OneHot(num_classes=100)\n data1 = data1.map(operations=one_hot_op, input_columns=[\"attr\"])\n\n cutmix_batch_op = vision.CutMixBatch(mode.ImageBatchFormat.NHWC, 0.5, 0.9)\n data1 = data1.batch(2, drop_remainder=True)\n data1 = data1.map(operations=cutmix_batch_op, input_columns=[\"image\", \"attr\"])\n\n images_cutmix = None\n for idx, (image, _) in enumerate(data1):\n if idx == 0:\n images_cutmix = image.asnumpy()\n else:\n images_cutmix = np.append(images_cutmix, image.asnumpy(), axis=0)\n if plot:\n visualize_list(images_original, images_cutmix)\n\n num_samples = images_original.shape[0]\n mse = np.zeros(num_samples)\n for i in range(num_samples):\n mse[i] = diff_mse(images_cutmix[i], images_original[i])\n logger.info(\"MSE= {}\".format(str(np.mean(mse))))\n\n\ndef test_cutmix_batch_nhwc_md5():\n \"\"\"\n Test CutMixBatch on a batch of HWC images with MD5:\n \"\"\"\n logger.info(\"test_cutmix_batch_nhwc_md5\")\n original_seed = config_get_set_seed(0)\n original_num_parallel_workers = config_get_set_num_parallel_workers(1)\n\n # CutMixBatch Images\n data = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n\n one_hot_op = data_trans.OneHot(num_classes=10)\n data = data.map(operations=one_hot_op, input_columns=[\"label\"])\n cutmix_batch_op = vision.CutMixBatch(mode.ImageBatchFormat.NHWC)\n data = data.batch(5, drop_remainder=True)\n data = data.map(operations=cutmix_batch_op, input_columns=[\"image\", \"label\"])\n\n filename = \"cutmix_batch_c_nhwc_result.npz\"\n save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)\n\n # Restore config setting\n ds.config.set_seed(original_seed)\n ds.config.set_num_parallel_workers(original_num_parallel_workers)\n\n\ndef test_cutmix_batch_nchw_md5():\n \"\"\"\n Test CutMixBatch on a batch of CHW images with MD5:\n \"\"\"\n logger.info(\"test_cutmix_batch_nchw_md5\")\n original_seed = config_get_set_seed(0)\n original_num_parallel_workers = config_get_set_num_parallel_workers(1)\n\n # CutMixBatch Images\n data = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n hwc2chw_op = vision.HWC2CHW()\n data = data.map(operations=hwc2chw_op, input_columns=[\"image\"])\n one_hot_op = data_trans.OneHot(num_classes=10)\n data = data.map(operations=one_hot_op, input_columns=[\"label\"])\n cutmix_batch_op = vision.CutMixBatch(mode.ImageBatchFormat.NCHW)\n data = data.batch(5, drop_remainder=True)\n data = data.map(operations=cutmix_batch_op, input_columns=[\"image\", \"label\"])\n\n filename = \"cutmix_batch_c_nchw_result.npz\"\n save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)\n\n # Restore config setting\n ds.config.set_seed(original_seed)\n ds.config.set_num_parallel_workers(original_num_parallel_workers)\n\n\ndef test_cutmix_batch_fail1():\n \"\"\"\n Test CutMixBatch Fail 1\n We expect this to fail because the images and labels are not batched\n \"\"\"\n logger.info(\"test_cutmix_batch_fail1\")\n\n # CutMixBatch Images\n data1 = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n\n one_hot_op = data_trans.OneHot(num_classes=10)\n data1 = data1.map(operations=one_hot_op, input_columns=[\"label\"])\n cutmix_batch_op = vision.CutMixBatch(mode.ImageBatchFormat.NHWC)\n with pytest.raises(RuntimeError) as error:\n data1 = data1.map(operations=cutmix_batch_op, input_columns=[\"image\", \"label\"])\n for idx, (image, _) in enumerate(data1):\n if idx == 0:\n images_cutmix = image.asnumpy()\n else:\n images_cutmix = np.append(images_cutmix, image.asnumpy(), axis=0)\n error_message = \"You must make sure images are HWC or CHW and batch \"\n assert error_message in str(error.value)\n\n\ndef test_cutmix_batch_fail2():\n \"\"\"\n Test CutMixBatch Fail 2\n We expect this to fail because alpha is negative\n \"\"\"\n logger.info(\"test_cutmix_batch_fail2\")\n\n # CutMixBatch Images\n data1 = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n\n one_hot_op = data_trans.OneHot(num_classes=10)\n data1 = data1.map(operations=one_hot_op, input_columns=[\"label\"])\n with pytest.raises(ValueError) as error:\n vision.CutMixBatch(mode.ImageBatchFormat.NHWC, -1)\n error_message = \"Input is not within the required interval\"\n assert error_message in str(error.value)\n\n\ndef test_cutmix_batch_fail3():\n \"\"\"\n Test CutMixBatch Fail 2\n We expect this to fail because prob is larger than 1\n \"\"\"\n logger.info(\"test_cutmix_batch_fail3\")\n\n # CutMixBatch Images\n data1 = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n\n one_hot_op = data_trans.OneHot(num_classes=10)\n data1 = data1.map(operations=one_hot_op, input_columns=[\"label\"])\n with pytest.raises(ValueError) as error:\n vision.CutMixBatch(mode.ImageBatchFormat.NHWC, 1, 2)\n error_message = \"Input is not within the required interval\"\n assert error_message in str(error.value)\n\n\ndef test_cutmix_batch_fail4():\n \"\"\"\n Test CutMixBatch Fail 2\n We expect this to fail because prob is negative\n \"\"\"\n logger.info(\"test_cutmix_batch_fail4\")\n\n # CutMixBatch Images\n data1 = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n\n one_hot_op = data_trans.OneHot(num_classes=10)\n data1 = data1.map(operations=one_hot_op, input_columns=[\"label\"])\n with pytest.raises(ValueError) as error:\n vision.CutMixBatch(mode.ImageBatchFormat.NHWC, 1, -1)\n error_message = \"Input is not within the required interval\"\n assert error_message in str(error.value)\n\n\ndef test_cutmix_batch_fail5():\n \"\"\"\n Test CutMixBatch op\n We expect this to fail because label column is not passed to cutmix_batch\n \"\"\"\n logger.info(\"test_cutmix_batch_fail5\")\n\n # CutMixBatch Images\n data1 = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n\n one_hot_op = data_trans.OneHot(num_classes=10)\n data1 = data1.map(operations=one_hot_op, input_columns=[\"label\"])\n cutmix_batch_op = vision.CutMixBatch(mode.ImageBatchFormat.NHWC)\n data1 = data1.batch(5, drop_remainder=True)\n data1 = data1.map(operations=cutmix_batch_op, input_columns=[\"image\"])\n\n with pytest.raises(RuntimeError) as error:\n images_cutmix = np.array([])\n for idx, (image, _) in enumerate(data1):\n if idx == 0:\n images_cutmix = image.asnumpy()\n else:\n images_cutmix = np.append(images_cutmix, image.asnumpy(), axis=0)\n error_message = \"Both images and labels columns are required\"\n assert error_message in str(error.value)\n\n\ndef test_cutmix_batch_fail6():\n \"\"\"\n Test CutMixBatch op\n We expect this to fail because image_batch_format passed to CutMixBatch doesn't match the format of the images\n \"\"\"\n logger.info(\"test_cutmix_batch_fail6\")\n\n # CutMixBatch Images\n data1 = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n\n one_hot_op = data_trans.OneHot(num_classes=10)\n data1 = data1.map(operations=one_hot_op, input_columns=[\"label\"])\n cutmix_batch_op = vision.CutMixBatch(mode.ImageBatchFormat.NCHW)\n data1 = data1.batch(5, drop_remainder=True)\n data1 = data1.map(operations=cutmix_batch_op, input_columns=[\"image\", \"label\"])\n\n with pytest.raises(RuntimeError) as error:\n images_cutmix = np.array([])\n for idx, (image, _) in enumerate(data1):\n if idx == 0:\n images_cutmix = image.asnumpy()\n else:\n images_cutmix = np.append(images_cutmix, image.asnumpy(), axis=0)\n error_message = \"CutMixBatch: Image doesn't match the given image format.\"\n assert error_message in str(error.value)\n\n\ndef test_cutmix_batch_fail7():\n \"\"\"\n Test CutMixBatch op\n We expect this to fail because labels are not in one-hot format\n \"\"\"\n logger.info(\"test_cutmix_batch_fail7\")\n\n # CutMixBatch Images\n data1 = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n\n cutmix_batch_op = vision.CutMixBatch(mode.ImageBatchFormat.NHWC)\n data1 = data1.batch(5, drop_remainder=True)\n data1 = data1.map(operations=cutmix_batch_op, input_columns=[\"image\", \"label\"])\n\n with pytest.raises(RuntimeError) as error:\n images_cutmix = np.array([])\n for idx, (image, _) in enumerate(data1):\n if idx == 0:\n images_cutmix = image.asnumpy()\n else:\n images_cutmix = np.append(images_cutmix, image.asnumpy(), axis=0)\n error_message = \"CutMixBatch: Wrong labels shape. The second column (labels) must have a shape of NC or NLC\"\n assert error_message in str(error.value)\n\n\ndef test_cutmix_batch_fail8():\n \"\"\"\n Test CutMixBatch Fail 8\n We expect this to fail because alpha is zero\n \"\"\"\n logger.info(\"test_cutmix_batch_fail8\")\n\n # CutMixBatch Images\n data1 = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False)\n\n one_hot_op = data_trans.OneHot(num_classes=10)\n data1 = data1.map(operations=one_hot_op, input_columns=[\"label\"])\n with pytest.raises(ValueError) as error:\n vision.CutMixBatch(mode.ImageBatchFormat.NHWC, 0.0)\n error_message = \"Input is not within the required interval\"\n assert error_message in str(error.value)\n\n\nif __name__ == \"__main__\":\n test_cutmix_batch_success1(plot=True)\n test_cutmix_batch_success2(plot=True)\n test_cutmix_batch_success3(plot=True)\n test_cutmix_batch_success4(plot=True)\n test_cutmix_batch_nchw_md5()\n test_cutmix_batch_nhwc_md5()\n test_cutmix_batch_fail1()\n test_cutmix_batch_fail2()\n test_cutmix_batch_fail3()\n test_cutmix_batch_fail4()\n test_cutmix_batch_fail5()\n test_cutmix_batch_fail6()\n test_cutmix_batch_fail7()\n test_cutmix_batch_fail8()\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"MobileNetV2 model define\"\"\"\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore.ops import operations as P\nfrom mindspore.ops.operations import TensorAdd\nfrom mindspore import Tensor\n\n__all__ = ['MobileNetV2', 'MobileNetV2Backbone', 'MobileNetV2Head', 'mobilenet_v2']\n\n\ndef _make_divisible(v, divisor, min_value=None):\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\nclass GlobalAvgPooling(nn.Cell):\n \"\"\"\n Global avg pooling definition.\n\n Args:\n\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> GlobalAvgPooling()\n \"\"\"\n\n def __init__(self):\n super(GlobalAvgPooling, self).__init__()\n self.mean = P.ReduceMean(keep_dims=False)\n\n def construct(self, x):\n x = self.mean(x, (2, 3))\n return x\n\n\nclass ConvBNReLU(nn.Cell):\n \"\"\"\n Convolution/Depthwise fused with Batchnorm and ReLU block definition.\n\n Args:\n in_planes (int): Input channel.\n out_planes (int): Output channel.\n kernel_size (int): Input kernel size.\n stride (int): Stride size for the first convolutional layer. Default: 1.\n groups (int): channel group. Convolution is 1 while Depthiwse is input channel. Default: 1.\n\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> ConvBNReLU(16, 256, kernel_size=1, stride=1, groups=1)\n \"\"\"\n\n def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):\n super(ConvBNReLU, self).__init__()\n padding = (kernel_size - 1) // 2\n in_channels = in_planes\n out_channels = out_planes\n if groups == 1:\n conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, pad_mode='pad', padding=padding)\n else:\n out_channels = in_planes\n conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, pad_mode='pad',\n padding=padding, group=in_channels)\n\n layers = [conv, nn.BatchNorm2d(out_planes), nn.ReLU6()]\n self.features = nn.SequentialCell(layers)\n\n def construct(self, x):\n output = self.features(x)\n return output\n\n\nclass InvertedResidual(nn.Cell):\n \"\"\"\n Mobilenetv2 residual block definition.\n\n Args:\n inp (int): Input channel.\n oup (int): Output channel.\n stride (int): Stride size for the first convolutional layer. Default: 1.\n expand_ratio (int): expand ration of input channel\n\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> ResidualBlock(3, 256, 1, 1)\n \"\"\"\n\n def __init__(self, inp, oup, stride, expand_ratio):\n super(InvertedResidual, self).__init__()\n assert stride in [1, 2]\n\n hidden_dim = int(round(inp * expand_ratio))\n self.use_res_connect = stride == 1 and inp == oup\n\n layers = []\n if expand_ratio != 1:\n layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))\n layers.extend([\n # dw\n ConvBNReLU(hidden_dim, hidden_dim,\n stride=stride, groups=hidden_dim),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, kernel_size=1,\n stride=1, has_bias=False),\n nn.BatchNorm2d(oup),\n ])\n self.conv = nn.SequentialCell(layers)\n self.add = TensorAdd()\n self.cast = P.Cast()\n\n def construct(self, x):\n identity = x\n x = self.conv(x)\n if self.use_res_connect:\n return self.add(identity, x)\n return x\n\n\nclass MobileNetV2Backbone(nn.Cell):\n \"\"\"\n MobileNetV2 architecture.\n\n Args:\n class_num (int): number of classes.\n width_mult (int): Channels multiplier for round to 8/16 and others. Default is 1.\n has_dropout (bool): Is dropout used. Default is false\n inverted_residual_setting (list): Inverted residual settings. Default is None\n round_nearest (list): Channel round to . Default is 8\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> MobileNetV2(num_classes=1000)\n \"\"\"\n\n def __init__(self, width_mult=1., inverted_residual_setting=None, round_nearest=8,\n input_channel=32, last_channel=1280):\n super(MobileNetV2Backbone, self).__init__()\n block = InvertedResidual\n # setting of inverted residual blocks\n self.cfgs = inverted_residual_setting\n if inverted_residual_setting is None:\n self.cfgs = [\n # t, c, n, s\n [1, 16, 1, 1],\n [6, 24, 2, 2],\n [6, 32, 3, 2],\n [6, 64, 4, 2],\n [6, 96, 3, 1],\n [6, 160, 3, 2],\n [6, 320, 1, 1],\n ]\n\n # building first layer\n input_channel = _make_divisible(input_channel * width_mult, round_nearest)\n self.out_channels = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)\n features = [ConvBNReLU(3, input_channel, stride=2)]\n # building inverted residual blocks\n for t, c, n, s in self.cfgs:\n output_channel = _make_divisible(c * width_mult, round_nearest)\n for i in range(n):\n stride = s if i == 0 else 1\n features.append(block(input_channel, output_channel, stride, expand_ratio=t))\n input_channel = output_channel\n # building last several layers\n features.append(ConvBNReLU(input_channel, self.out_channels, kernel_size=1))\n # make it nn.CellList\n self.features = nn.SequentialCell(features)\n self._initialize_weights()\n\n def construct(self, x):\n x = self.features(x)\n return x\n\n def _initialize_weights(self):\n \"\"\"\n Initialize weights.\n\n Args:\n\n Returns:\n None.\n\n Examples:\n >>> _initialize_weights()\n \"\"\"\n self.init_parameters_data()\n for _, m in self.cells_and_names():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.set_data(Tensor(np.random.normal(0, np.sqrt(2. / n),\n m.weight.data.shape).astype(\"float32\")))\n if m.bias is not None:\n m.bias.set_data(\n Tensor(np.zeros(m.bias.data.shape, dtype=\"float32\")))\n elif isinstance(m, nn.BatchNorm2d):\n m.gamma.set_data(\n Tensor(np.ones(m.gamma.data.shape, dtype=\"float32\")))\n m.beta.set_data(\n Tensor(np.zeros(m.beta.data.shape, dtype=\"float32\")))\n\n @property\n def get_features(self):\n return self.features\n\n\nclass MobileNetV2Head(nn.Cell):\n \"\"\"\n MobileNetV2 architecture.\n\n Args:\n class_num (int): Number of classes. Default is 1000.\n has_dropout (bool): Is dropout used. Default is false\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> MobileNetV2(num_classes=1000)\n \"\"\"\n\n def __init__(self, input_channel=1280, num_classes=1000, has_dropout=False, activation=\"None\"):\n super(MobileNetV2Head, self).__init__()\n # mobilenet head\n head = ([GlobalAvgPooling(), nn.Dense(input_channel, num_classes, has_bias=True)] if not has_dropout else\n [GlobalAvgPooling(), nn.Dropout(0.2), nn.Dense(input_channel, num_classes, has_bias=True)])\n self.head = nn.SequentialCell(head)\n self.need_activation = True\n if activation == \"Sigmoid\":\n self.activation = P.Sigmoid()\n elif activation == \"Softmax\":\n self.activation = P.Softmax()\n else:\n self.need_activation = False\n self._initialize_weights()\n\n def construct(self, x):\n x = self.head(x)\n if self.need_activation:\n x = self.activation(x)\n return x\n\n def _initialize_weights(self):\n \"\"\"\n Initialize weights.\n\n Args:\n\n Returns:\n None.\n\n Examples:\n >>> _initialize_weights()\n \"\"\"\n self.init_parameters_data()\n for _, m in self.cells_and_names():\n if isinstance(m, nn.Dense):\n m.weight.set_data(Tensor(np.random.normal(\n 0, 0.01, m.weight.data.shape).astype(\"float32\")))\n if m.bias is not None:\n m.bias.set_data(\n Tensor(np.zeros(m.bias.data.shape, dtype=\"float32\")))\n @property\n def get_head(self):\n return self.head\n\n\nclass MobileNetV2(nn.Cell):\n \"\"\"\n MobileNetV2 architecture.\n\n Args:\n class_num (int): number of classes.\n width_mult (int): Channels multiplier for round to 8/16 and others. Default is 1.\n has_dropout (bool): Is dropout used. Default is false\n inverted_residual_setting (list): Inverted residual settings. Default is None\n round_nearest (list): Channel round to . Default is 8\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> MobileNetV2(backbone, head)\n \"\"\"\n\n def __init__(self, num_classes=1000, width_mult=1., has_dropout=False, inverted_residual_setting=None, \\\n round_nearest=8, input_channel=32, last_channel=1280):\n super(MobileNetV2, self).__init__()\n self.backbone = MobileNetV2Backbone(width_mult=width_mult, \\\n inverted_residual_setting=inverted_residual_setting, \\\n round_nearest=round_nearest, input_channel=input_channel, last_channel=last_channel).get_features\n self.head = MobileNetV2Head(input_channel=self.backbone.out_channel, num_classes=num_classes, \\\n has_dropout=has_dropout).get_head\n\n def construct(self, x):\n x = self.backbone(x)\n x = self.head(x)\n return x\n\n\nclass MobileNetV2Combine(nn.Cell):\n \"\"\"\n MobileNetV2Combine architecture.\n\n Args:\n backbone (Cell): the features extract layers.\n head (Cell): the fully connected layers.\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> MobileNetV2(num_classes=1000)\n \"\"\"\n\n def __init__(self, backbone, head):\n super(MobileNetV2Combine, self).__init__(auto_prefix=False)\n self.backbone = backbone\n self.head = head\n\n def construct(self, x):\n x = self.backbone(x)\n x = self.head(x)\n return x\n\n\ndef mobilenet_v2(backbone, head):\n return MobileNetV2Combine(backbone, head)\n", "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nCifar100 convert tool for MindRecord.\n\"\"\"\n\nfrom importlib import import_module\nimport os\nimport numpy as np\n\nfrom mindspore import log as logger\nfrom .cifar100 import Cifar100\nfrom ..common.exceptions import PathNotExistsError\nfrom ..filewriter import FileWriter\nfrom ..shardutils import check_filename, ExceptionThread, SUCCESS\n\ntry:\n cv2 = import_module(\"cv2\")\nexcept ModuleNotFoundError:\n cv2 = None\n\n__all__ = ['Cifar100ToMR']\n\nclass Cifar100ToMR:\n \"\"\"\n A class to transform from cifar100 to MindRecord.\n\n Args:\n source (str): the cifar100 directory to be transformed.\n destination (str): the MindRecord file path to transform into.\n\n Raises:\n ValueError: If source or destination is invalid.\n \"\"\"\n def __init__(self, source, destination):\n check_filename(source)\n self.source = source\n\n files = os.listdir(self.source)\n train_data_flag = False\n test_data_flag = False\n for file in files:\n if file == \"train\":\n train_data_flag = True\n if file == \"test\":\n test_data_flag = True\n if not train_data_flag:\n raise PathNotExistsError(\"train\")\n\n if not test_data_flag:\n raise PathNotExistsError(\"test\")\n\n check_filename(destination)\n self.destination = destination\n self.writer = None\n\n def run(self, fields=None):\n \"\"\"\n Executes transformation from cifar100 to MindRecord.\n\n Args:\n fields (list[str]): A list of index field, e.g.[\"fine_label\", \"coarse_label\"].\n\n Returns:\n SUCCESS or FAILED, whether cifar100 is successfully transformed to MindRecord.\n \"\"\"\n if fields and not isinstance(fields, list):\n raise ValueError(\"The parameter fields should be None or list\")\n\n cifar100_data = Cifar100(self.source, False)\n cifar100_data.load_data()\n\n images = cifar100_data.images\n logger.info(\"train images: {}\".format(images.shape))\n fine_labels = cifar100_data.fine_labels\n logger.info(\"train images fine label: {}\".format(fine_labels.shape))\n coarse_labels = cifar100_data.coarse_labels\n logger.info(\"train images coarse label: {}\".format(coarse_labels.shape))\n\n test_images = cifar100_data.Test.images\n logger.info(\"test images: {}\".format(test_images.shape))\n test_fine_labels = cifar100_data.Test.fine_labels\n logger.info(\"test images fine label: {}\".format(fine_labels.shape))\n test_coarse_labels = cifar100_data.Test.coarse_labels\n logger.info(\"test images coarse label: {}\".format(coarse_labels.shape))\n\n data_list = _construct_raw_data(images, fine_labels, coarse_labels)\n test_data_list = _construct_raw_data(test_images, test_fine_labels, test_coarse_labels)\n\n if _generate_mindrecord(self.destination, data_list, fields, \"img_train\") != SUCCESS:\n return FAILED\n if _generate_mindrecord(self.destination + \"_test\", test_data_list, fields, \"img_test\") != SUCCESS:\n return FAILED\n return SUCCESS\n\n def transform(self, fields=None):\n t = ExceptionThread(target=self.run, kwargs={'fields': fields})\n t.daemon = True\n t.start()\n t.join()\n if t.exitcode != 0:\n raise t.exception\n return t.res\n\ndef _construct_raw_data(images, fine_labels, coarse_labels):\n \"\"\"\n Construct raw data from cifar100 data.\n\n Args:\n images (list): image list from cifar100.\n fine_labels (list): fine label list from cifar100.\n coarse_labels (list): coarse label list from cifar100.\n\n Returns:\n SUCCESS/FAILED, whether successfully written into MindRecord.\n \"\"\"\n if not cv2:\n raise ModuleNotFoundError(\"opencv-python module not found, please use pip install it.\")\n\n raw_data = []\n for i, img in enumerate(images):\n fine_label = np.int(fine_labels[i][0])\n coarse_label = np.int(coarse_labels[i][0])\n _, img = cv2.imencode(\".jpeg\", img[..., [2, 1, 0]])\n row_data = {\"id\": int(i),\n \"data\": img.tobytes(),\n \"fine_label\": int(fine_label),\n \"coarse_label\": int(coarse_label)}\n raw_data.append(row_data)\n return raw_data\n\ndef _generate_mindrecord(file_name, raw_data, fields, schema_desc):\n \"\"\"\n Generate MindRecord file from raw data.\n\n Args:\n file_name (str): File name of MindRecord File.\n fields (list[str]): Fields would be set as index which\n could not belong to blob fields and type could not be 'array' or 'bytes'.\n raw_data (dict): Dict of raw data.\n schema_desc (str): String of schema description.\n\n Returns:\n SUCCESS/FAILED, whether successfully written into MindRecord.\n \"\"\"\n schema = {\"id\": {\"type\": \"int64\"}, \"fine_label\": {\"type\": \"int64\"},\n \"coarse_label\": {\"type\": \"int64\"}, \"data\": {\"type\": \"bytes\"}}\n\n logger.info(\"transformed MindRecord schema is: {}\".format(schema))\n\n writer = FileWriter(file_name, 1)\n writer.add_schema(schema, schema_desc)\n if fields and isinstance(fields, list):\n writer.add_index(fields)\n writer.write_raw_data(raw_data)\n return writer.commit()\n", "# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\"\"\"wide and deep model\"\"\"\r\nimport numpy as np\r\n\r\nimport mindspore.common.dtype as mstype\r\nfrom mindspore import nn, context\r\nfrom mindspore import Tensor, Parameter, ParameterTuple\r\nfrom mindspore.ops import functional as F\r\nfrom mindspore.ops import composite as C\r\nfrom mindspore.ops import operations as P\r\nfrom mindspore.nn import Dropout, Flatten\r\nfrom mindspore.nn.optim import Adam, FTRL\r\nfrom mindspore.common.initializer import Uniform, initializer\r\nfrom mindspore.context import ParallelMode\r\nfrom mindspore.nn.wrap.grad_reducer import DistributedGradReducer\r\n\r\n\r\nnp_type = np.float32\r\nms_type = mstype.float32\r\n\r\n\r\ndef init_method(method, shape, name, max_val=1.0):\r\n \"\"\"\r\n Init method\r\n \"\"\"\r\n if method in ['uniform']:\r\n params = Parameter(initializer(Uniform(max_val), shape, ms_type),\r\n name=name)\r\n elif method == \"one\":\r\n params = Parameter(initializer(\"ones\", shape, ms_type), name=name)\r\n elif method == 'zero':\r\n params = Parameter(initializer(\"zeros\", shape, ms_type), name=name)\r\n elif method == \"normal\":\r\n params = Parameter(Tensor(\r\n np.random.normal(loc=0.0, scale=0.01,\r\n size=shape).astype(dtype=np_type)),\r\n name=name)\r\n return params\r\n\r\n\r\ndef init_var_dict(init_args, in_vars):\r\n \"\"\"\r\n Init parameters by dict\r\n \"\"\"\r\n var_map = {}\r\n _, _max_val = init_args\r\n for _, iterm in enumerate(in_vars):\r\n key, shape, method = iterm\r\n if key not in var_map.keys():\r\n if method in ['random', 'uniform']:\r\n var_map[key] = Parameter(initializer(Uniform(_max_val), shape,\r\n ms_type),\r\n name=key)\r\n elif method == \"one\":\r\n var_map[key] = Parameter(initializer(\"ones\", shape, ms_type),\r\n name=key)\r\n elif method == \"zero\":\r\n var_map[key] = Parameter(initializer(\"zeros\", shape, ms_type),\r\n name=key)\r\n elif method == 'normal':\r\n var_map[key] = Parameter(Tensor(\r\n np.random.normal(loc=0.0, scale=0.01,\r\n size=shape).astype(dtype=np_type)),\r\n name=key)\r\n return var_map\r\n\r\n\r\nclass DenseLayer(nn.Cell):\r\n \"\"\"\r\n Dense Layer for Deep Layer of WideDeep Model;\r\n Containing: activation, matmul, bias_add;\r\n \"\"\"\r\n def __init__(self,\r\n input_dim,\r\n output_dim,\r\n weight_bias_init,\r\n act_str,\r\n keep_prob=0.8,\r\n scale_coef=1.0,\r\n use_activation=True,\r\n convert_dtype=True,\r\n drop_out=False):\r\n super(DenseLayer, self).__init__()\r\n weight_init, bias_init = weight_bias_init\r\n self.weight = init_method(weight_init, [input_dim, output_dim],\r\n name=\"weight\")\r\n self.bias = init_method(bias_init, [output_dim], name=\"bias\")\r\n self.act_func = self._init_activation(act_str)\r\n self.matmul = P.MatMul(transpose_b=False)\r\n self.bias_add = P.BiasAdd()\r\n self.cast = P.Cast()\r\n self.dropout = Dropout(keep_prob=keep_prob)\r\n self.mul = P.Mul()\r\n self.realDiv = P.RealDiv()\r\n self.scale_coef = scale_coef\r\n self.use_activation = use_activation\r\n self.convert_dtype = convert_dtype\r\n self.drop_out = drop_out\r\n\r\n def _init_activation(self, act_str):\r\n act_str = act_str.lower()\r\n if act_str == \"relu\":\r\n act_func = P.ReLU()\r\n elif act_str == \"sigmoid\":\r\n act_func = P.Sigmoid()\r\n elif act_str == \"tanh\":\r\n act_func = P.Tanh()\r\n return act_func\r\n\r\n def construct(self, x):\r\n '''\r\n Construct Dense layer\r\n '''\r\n if self.training and self.drop_out:\r\n x = self.dropout(x)\r\n if self.convert_dtype:\r\n x = self.cast(x, mstype.float16)\r\n weight = self.cast(self.weight, mstype.float16)\r\n bias = self.cast(self.bias, mstype.float16)\r\n wx = self.matmul(x, weight)\r\n wx = self.bias_add(wx, bias)\r\n if self.use_activation:\r\n wx = self.act_func(wx)\r\n wx = self.cast(wx, mstype.float32)\r\n else:\r\n wx = self.matmul(x, self.weight)\r\n wx = self.bias_add(wx, self.bias)\r\n if self.use_activation:\r\n wx = self.act_func(wx)\r\n return wx\r\n\r\n\r\nclass WideDeepModel(nn.Cell):\r\n \"\"\"\r\n From paper: \" Wide & Deep Learning for Recommender Systems\"\r\n Args:\r\n config (Class): The default config of Wide&Deep\r\n \"\"\"\r\n def __init__(self, config):\r\n super(WideDeepModel, self).__init__()\r\n emb_128_size = 650000\r\n emb64_single_size = 17300\r\n emb64_multi_size = 20900\r\n indicator_size = 16\r\n deep_dim_list = [1024, 1024, 1024, 1024, 1024]\r\n\r\n wide_reg_coef = [0.0, 0.0]\r\n deep_reg_coef = [0.0, 0.0]\r\n wide_lr = 0.2\r\n deep_lr = 1.0\r\n\r\n self.input_emb_dim = config.input_emb_dim\r\n self.batch_size = config.batch_size\r\n self.deep_layer_act = config.deep_layers_act\r\n self.init_args = config.init_args\r\n self.weight_init, self.bias_init = config.weight_bias_init\r\n self.weight_bias_init = config.weight_bias_init\r\n self.emb_init = config.emb_init\r\n\r\n self.keep_prob = config.keep_prob\r\n self.layer_dims = deep_dim_list + [1]\r\n self.all_dim_list = [self.input_emb_dim] + self.layer_dims\r\n\r\n self.continue_field_size = 32\r\n self.emb_128_size = emb_128_size\r\n self.emb64_single_size = emb64_single_size\r\n self.emb64_multi_size = emb64_multi_size\r\n self.indicator_size = indicator_size\r\n\r\n self.wide_l1_coef, self.wide_l2_coef = wide_reg_coef\r\n self.deep_l1_coef, self.deep_l2_coef = deep_reg_coef\r\n self.wide_lr = wide_lr\r\n self.deep_lr = deep_lr\r\n\r\n init_acts_embedding_metrix = [\r\n ('emb128_embedding', [self.emb_128_size, 128], self.emb_init),\r\n ('emb64_single', [self.emb64_single_size, 64], self.emb_init),\r\n ('emb64_multi', [self.emb64_multi_size, 64], self.emb_init),\r\n ('emb64_indicator', [self.indicator_size, 64], self.emb_init)\r\n ]\r\n var_map = init_var_dict(self.init_args, init_acts_embedding_metrix)\r\n self.emb128_embedding = var_map[\"emb128_embedding\"]\r\n self.emb64_single = var_map[\"emb64_single\"]\r\n self.emb64_multi = var_map[\"emb64_multi\"]\r\n self.emb64_indicator = var_map[\"emb64_indicator\"]\r\n\r\n init_acts_wide_weight = [\r\n ('wide_continue_w', [self.continue_field_size], self.emb_init),\r\n ('wide_emb128_w', [self.emb_128_size], self.emb_init),\r\n ('wide_emb64_single_w', [self.emb64_single_size], self.emb_init),\r\n ('wide_emb64_multi_w', [self.emb64_multi_size], self.emb_init),\r\n ('wide_indicator_w', [self.indicator_size], self.emb_init),\r\n ('wide_bias', [1], self.emb_init)\r\n ]\r\n var_map = init_var_dict(self.init_args, init_acts_wide_weight)\r\n self.wide_continue_w = var_map[\"wide_continue_w\"]\r\n self.wide_emb128_w = var_map[\"wide_emb128_w\"]\r\n self.wide_emb64_single_w = var_map[\"wide_emb64_single_w\"]\r\n self.wide_emb64_multi_w = var_map[\"wide_emb64_multi_w\"]\r\n self.wide_indicator_w = var_map[\"wide_indicator_w\"]\r\n self.wide_bias = var_map[\"wide_bias\"]\r\n\r\n self.dense_layer_1 = DenseLayer(self.all_dim_list[0],\r\n self.all_dim_list[1],\r\n self.weight_bias_init,\r\n self.deep_layer_act,\r\n drop_out=config.dropout_flag,\r\n convert_dtype=True)\r\n self.dense_layer_2 = DenseLayer(self.all_dim_list[1],\r\n self.all_dim_list[2],\r\n self.weight_bias_init,\r\n self.deep_layer_act,\r\n drop_out=config.dropout_flag,\r\n convert_dtype=True)\r\n self.dense_layer_3 = DenseLayer(self.all_dim_list[2],\r\n self.all_dim_list[3],\r\n self.weight_bias_init,\r\n self.deep_layer_act,\r\n drop_out=config.dropout_flag,\r\n convert_dtype=True)\r\n self.dense_layer_4 = DenseLayer(self.all_dim_list[3],\r\n self.all_dim_list[4],\r\n self.weight_bias_init,\r\n self.deep_layer_act,\r\n drop_out=config.dropout_flag,\r\n convert_dtype=True)\r\n self.dense_layer_5 = DenseLayer(self.all_dim_list[4],\r\n self.all_dim_list[5],\r\n self.weight_bias_init,\r\n self.deep_layer_act,\r\n drop_out=config.dropout_flag,\r\n convert_dtype=True)\r\n\r\n self.deep_predict = DenseLayer(self.all_dim_list[5],\r\n self.all_dim_list[6],\r\n self.weight_bias_init,\r\n self.deep_layer_act,\r\n drop_out=config.dropout_flag,\r\n convert_dtype=True,\r\n use_activation=False)\r\n\r\n self.gather_v2 = P.GatherV2()\r\n self.mul = P.Mul()\r\n self.reduce_sum_false = P.ReduceSum(keep_dims=False)\r\n self.reduce_sum_true = P.ReduceSum(keep_dims=True)\r\n self.reshape = P.Reshape()\r\n self.square = P.Square()\r\n self.shape = P.Shape()\r\n self.tile = P.Tile()\r\n self.concat = P.Concat(axis=1)\r\n self.cast = P.Cast()\r\n self.reduceMean_false = P.ReduceMean(keep_dims=False)\r\n self.Concat = P.Concat(axis=1)\r\n self.BiasAdd = P.BiasAdd()\r\n self.expand_dims = P.ExpandDims()\r\n self.flatten = Flatten()\r\n\r\n def construct(self, continue_val, indicator_id, emb_128_id,\r\n emb_64_single_id, multi_doc_ad_category_id,\r\n multi_doc_ad_category_id_mask, multi_doc_event_entity_id,\r\n multi_doc_event_entity_id_mask, multi_doc_ad_entity_id,\r\n multi_doc_ad_entity_id_mask, multi_doc_event_topic_id,\r\n multi_doc_event_topic_id_mask, multi_doc_event_category_id,\r\n multi_doc_event_category_id_mask, multi_doc_ad_topic_id,\r\n multi_doc_ad_topic_id_mask, display_id, ad_id,\r\n display_ad_and_is_leak, is_leak):\r\n \"\"\"\r\n Args:\r\n id_hldr: batch ids;\r\n wt_hldr: batch weights;\r\n \"\"\"\r\n\r\n val_hldr = continue_val\r\n ind_hldr = indicator_id\r\n emb128_id_hldr = emb_128_id\r\n emb64_single_hldr = emb_64_single_id\r\n\r\n ind_emb = self.gather_v2(self.emb64_indicator, ind_hldr, 0)\r\n ind_emb = self.flatten(ind_emb)\r\n\r\n emb128_id_emb = self.gather_v2(self.emb128_embedding, emb128_id_hldr,\r\n 0)\r\n emb128_id_emb = self.flatten(emb128_id_emb)\r\n\r\n emb64_sgl_emb = self.gather_v2(self.emb64_single, emb64_single_hldr, 0)\r\n emb64_sgl_emb = self.flatten(emb64_sgl_emb)\r\n\r\n mult_emb_1 = self.gather_v2(self.emb64_multi, multi_doc_ad_category_id,\r\n 0)\r\n mult_emb_1 = self.mul(\r\n self.cast(mult_emb_1, mstype.float32),\r\n self.cast(self.expand_dims(multi_doc_ad_category_id_mask, 2),\r\n mstype.float32))\r\n mult_emb_1 = self.reduceMean_false(mult_emb_1, 1)\r\n\r\n mult_emb_2 = self.gather_v2(self.emb64_multi,\r\n multi_doc_event_entity_id, 0)\r\n mult_emb_2 = self.mul(\r\n self.cast(mult_emb_2, mstype.float32),\r\n self.cast(self.expand_dims(multi_doc_event_entity_id_mask, 2),\r\n mstype.float32))\r\n mult_emb_2 = self.reduceMean_false(mult_emb_2, 1)\r\n\r\n mult_emb_3 = self.gather_v2(self.emb64_multi, multi_doc_ad_entity_id,\r\n 0)\r\n mult_emb_3 = self.mul(\r\n self.cast(mult_emb_3, mstype.float32),\r\n self.cast(self.expand_dims(multi_doc_ad_entity_id_mask, 2),\r\n mstype.float32))\r\n mult_emb_3 = self.reduceMean_false(mult_emb_3, 1)\r\n\r\n mult_emb_4 = self.gather_v2(self.emb64_multi, multi_doc_event_topic_id,\r\n 0)\r\n mult_emb_4 = self.mul(\r\n self.cast(mult_emb_4, mstype.float32),\r\n self.cast(self.expand_dims(multi_doc_event_topic_id_mask, 2),\r\n mstype.float32))\r\n mult_emb_4 = self.reduceMean_false(mult_emb_4, 1)\r\n\r\n mult_emb_5 = self.gather_v2(self.emb64_multi,\r\n multi_doc_event_category_id, 0)\r\n mult_emb_5 = self.mul(\r\n self.cast(mult_emb_5, mstype.float32),\r\n self.cast(self.expand_dims(multi_doc_event_category_id_mask, 2),\r\n mstype.float32))\r\n mult_emb_5 = self.reduceMean_false(mult_emb_5, 1)\r\n\r\n mult_emb_6 = self.gather_v2(self.emb64_multi, multi_doc_ad_topic_id, 0)\r\n mult_emb_6 = self.mul(\r\n self.cast(mult_emb_6, mstype.float32),\r\n self.cast(self.expand_dims(multi_doc_ad_topic_id_mask, 2),\r\n mstype.float32))\r\n mult_emb_6 = self.reduceMean_false(mult_emb_6, 1)\r\n\r\n mult_embedding = self.Concat((mult_emb_1, mult_emb_2, mult_emb_3,\r\n mult_emb_4, mult_emb_5, mult_emb_6))\r\n\r\n input_embedding = self.Concat((val_hldr * 1, ind_emb, emb128_id_emb,\r\n emb64_sgl_emb, mult_embedding))\r\n deep_out = self.dense_layer_1(input_embedding)\r\n deep_out = self.dense_layer_2(deep_out)\r\n deep_out = self.dense_layer_3(deep_out)\r\n deep_out = self.dense_layer_4(deep_out)\r\n deep_out = self.dense_layer_5(deep_out)\r\n\r\n deep_out = self.deep_predict(deep_out)\r\n\r\n val_weight = self.mul(val_hldr,\r\n self.expand_dims(self.wide_continue_w, 0))\r\n\r\n val_w_sum = self.reduce_sum_true(val_weight, 1)\r\n\r\n ind_weight = self.gather_v2(self.wide_indicator_w, ind_hldr, 0)\r\n ind_w_sum = self.reduce_sum_true(ind_weight, 1)\r\n\r\n emb128_id_weight = self.gather_v2(self.wide_emb128_w, emb128_id_hldr,\r\n 0)\r\n emb128_w_sum = self.reduce_sum_true(emb128_id_weight, 1)\r\n\r\n emb64_sgl_weight = self.gather_v2(self.wide_emb64_single_w,\r\n emb64_single_hldr, 0)\r\n emb64_w_sum = self.reduce_sum_true(emb64_sgl_weight, 1)\r\n\r\n mult_weight_1 = self.gather_v2(self.wide_emb64_multi_w,\r\n multi_doc_ad_category_id, 0)\r\n mult_weight_1 = self.mul(\r\n self.cast(mult_weight_1, mstype.float32),\r\n self.cast(multi_doc_ad_category_id_mask, mstype.float32))\r\n mult_weight_1 = self.reduce_sum_true(mult_weight_1, 1)\r\n\r\n mult_weight_2 = self.gather_v2(self.wide_emb64_multi_w,\r\n multi_doc_event_entity_id, 0)\r\n mult_weight_2 = self.mul(\r\n self.cast(mult_weight_2, mstype.float32),\r\n self.cast(multi_doc_event_entity_id_mask, mstype.float32))\r\n mult_weight_2 = self.reduce_sum_true(mult_weight_2, 1)\r\n\r\n mult_weight_3 = self.gather_v2(self.wide_emb64_multi_w,\r\n multi_doc_ad_entity_id, 0)\r\n mult_weight_3 = self.mul(\r\n self.cast(mult_weight_3, mstype.float32),\r\n self.cast(multi_doc_ad_entity_id_mask, mstype.float32))\r\n mult_weight_3 = self.reduce_sum_true(mult_weight_3, 1)\r\n\r\n mult_weight_4 = self.gather_v2(self.wide_emb64_multi_w,\r\n multi_doc_event_topic_id, 0)\r\n mult_weight_4 = self.mul(\r\n self.cast(mult_weight_4, mstype.float32),\r\n self.cast(multi_doc_event_topic_id_mask, mstype.float32))\r\n mult_weight_4 = self.reduce_sum_true(mult_weight_4, 1)\r\n\r\n mult_weight_5 = self.gather_v2(self.wide_emb64_multi_w,\r\n multi_doc_event_category_id, 0)\r\n mult_weight_5 = self.mul(\r\n self.cast(mult_weight_5, mstype.float32),\r\n self.cast(multi_doc_event_category_id_mask, mstype.float32))\r\n mult_weight_5 = self.reduce_sum_true(mult_weight_5, 1)\r\n\r\n mult_weight_6 = self.gather_v2(self.wide_emb64_multi_w,\r\n multi_doc_ad_topic_id, 0)\r\n\r\n mult_weight_6 = self.mul(\r\n self.cast(mult_weight_6, mstype.float32),\r\n self.cast(multi_doc_ad_topic_id_mask, mstype.float32))\r\n mult_weight_6 = self.reduce_sum_true(mult_weight_6, 1)\r\n\r\n mult_weight_sum = mult_weight_1 + mult_weight_2 + mult_weight_3 + mult_weight_4 + mult_weight_5 + mult_weight_6\r\n\r\n wide_out = self.BiasAdd(\r\n val_w_sum + ind_w_sum + emb128_w_sum + emb64_w_sum +\r\n mult_weight_sum, self.wide_bias)\r\n\r\n out = wide_out + deep_out\r\n return out, self.emb128_embedding, self.emb64_single, self.emb64_multi\r\n\r\n\r\nclass NetWithLossClass(nn.Cell):\r\n \"\"\"\"\r\n Provide WideDeep training loss through network.\r\n\r\n Args:\r\n network (Cell): The training network\r\n config (Class): WideDeep config\r\n \"\"\"\r\n def __init__(self, network, config):\r\n super(NetWithLossClass, self).__init__(auto_prefix=False)\r\n self.network = network\r\n self.l2_coef = config.l2_coef\r\n\r\n self.loss = P.SigmoidCrossEntropyWithLogits()\r\n self.square = P.Square()\r\n self.reduceMean_false = P.ReduceMean(keep_dims=False)\r\n self.reduceSum_false = P.ReduceSum(keep_dims=False)\r\n self.reshape = P.Reshape()\r\n\r\n def construct(self, label, continue_val, indicator_id, emb_128_id,\r\n emb_64_single_id, multi_doc_ad_category_id,\r\n multi_doc_ad_category_id_mask, multi_doc_event_entity_id,\r\n multi_doc_event_entity_id_mask, multi_doc_ad_entity_id,\r\n multi_doc_ad_entity_id_mask, multi_doc_event_topic_id,\r\n multi_doc_event_topic_id_mask, multi_doc_event_category_id,\r\n multi_doc_event_category_id_mask, multi_doc_ad_topic_id,\r\n multi_doc_ad_topic_id_mask, display_id, ad_id,\r\n display_ad_and_is_leak, is_leak):\r\n \"\"\"\r\n NetWithLossClass construct\r\n \"\"\"\r\n # emb128_embedding, emb64_single, emb64_multi\r\n predict, _, _, _ = self.network(\r\n continue_val, indicator_id, emb_128_id, emb_64_single_id,\r\n multi_doc_ad_category_id, multi_doc_ad_category_id_mask,\r\n multi_doc_event_entity_id, multi_doc_event_entity_id_mask,\r\n multi_doc_ad_entity_id, multi_doc_ad_entity_id_mask,\r\n multi_doc_event_topic_id, multi_doc_event_topic_id_mask,\r\n multi_doc_event_category_id, multi_doc_event_category_id_mask,\r\n multi_doc_ad_topic_id, multi_doc_ad_topic_id_mask, display_id,\r\n ad_id, display_ad_and_is_leak, is_leak)\r\n\r\n predict = self.reshape(predict, (-1,))\r\n basic_loss = self.loss(predict, label)\r\n wide_loss = self.reduceMean_false(basic_loss)\r\n deep_loss = self.reduceMean_false(basic_loss)\r\n return wide_loss, deep_loss\r\n\r\n\r\nclass IthOutputCell(nn.Cell):\r\n \"\"\"\r\n IthOutputCell\r\n \"\"\"\r\n def __init__(self, network, output_index):\r\n super(IthOutputCell, self).__init__()\r\n self.network = network\r\n self.output_index = output_index\r\n\r\n def construct(self, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13,\r\n x14, x15, x16, x17, x18, x19, x20, x21):\r\n \"\"\"\r\n IthOutputCell construct\r\n \"\"\"\r\n predict = self.network(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11,\r\n x12, x13, x14, x15, x16, x17, x18, x19, x20,\r\n x21)[self.output_index]\r\n return predict\r\n\r\n\r\nclass TrainStepWrap(nn.Cell):\r\n \"\"\"\r\n Encapsulation class of WideDeep network training.\r\n\r\n Append Adam and FTRL optimizers to the training network after that construct\r\n function can be called to create the backward graph.\r\n\r\n Args:\r\n network (Cell): the training network. Note that loss function should have been added.\r\n sens (Number): The adjust parameter. Default: 1000.0\r\n \"\"\"\r\n def __init__(self, network, config, sens=1000.0):\r\n super(TrainStepWrap, self).__init__()\r\n self.network = network\r\n self.network.set_train()\r\n self.trainable_params = network.trainable_params()\r\n weights_w = []\r\n weights_d = []\r\n for params in self.trainable_params:\r\n if 'wide' in params.name:\r\n weights_w.append(params)\r\n else:\r\n weights_d.append(params)\r\n\r\n self.weights_w = ParameterTuple(weights_w)\r\n self.weights_d = ParameterTuple(weights_d)\r\n self.optimizer_w = FTRL(learning_rate=config.ftrl_lr,\r\n params=self.weights_w,\r\n l1=5e-4,\r\n l2=5e-4,\r\n initial_accum=0.1,\r\n loss_scale=sens)\r\n\r\n self.optimizer_d = Adam(self.weights_d,\r\n learning_rate=config.adam_lr,\r\n eps=1e-6,\r\n loss_scale=sens)\r\n\r\n self.hyper_map = C.HyperMap()\r\n\r\n self.grad_w = C.GradOperation(get_by_list=True,\r\n sens_param=True)\r\n self.grad_d = C.GradOperation(get_by_list=True,\r\n sens_param=True)\r\n\r\n self.sens = sens\r\n self.loss_net_w = IthOutputCell(network, output_index=0)\r\n self.loss_net_d = IthOutputCell(network, output_index=1)\r\n self.loss_net_w.set_grad()\r\n self.loss_net_w.set_grad()\r\n\r\n self.reducer_flag = False\r\n self.grad_reducer_w = None\r\n self.grad_reducer_d = None\r\n parallel_mode = context.get_auto_parallel_context(\"parallel_mode\")\r\n if parallel_mode in (ParallelMode.DATA_PARALLEL,\r\n ParallelMode.HYBRID_PARALLEL):\r\n self.reducer_flag = True\r\n if self.reducer_flag:\r\n mean = context.get_auto_parallel_context(\"gradients_mean\")\r\n degree = context.get_auto_parallel_context(\"device_num\")\r\n self.grad_reducer_w = DistributedGradReducer(\r\n self.optimizer_w.parameters, mean, degree)\r\n self.grad_reducer_d = DistributedGradReducer(\r\n self.optimizer_d.parameters, mean, degree)\r\n\r\n def construct(self, label, continue_val, indicator_id, emb_128_id,\r\n emb_64_single_id, multi_doc_ad_category_id,\r\n multi_doc_ad_category_id_mask, multi_doc_event_entity_id,\r\n multi_doc_event_entity_id_mask, multi_doc_ad_entity_id,\r\n multi_doc_ad_entity_id_mask, multi_doc_event_topic_id,\r\n multi_doc_event_topic_id_mask, multi_doc_event_category_id,\r\n multi_doc_event_category_id_mask, multi_doc_ad_topic_id,\r\n multi_doc_ad_topic_id_mask, display_id, ad_id,\r\n display_ad_and_is_leak, is_leak):\r\n \"\"\"\r\n TrainStepWrap construct\r\n \"\"\"\r\n weights_w = self.weights_w\r\n weights_d = self.weights_d\r\n loss_w, loss_d = self.network(\r\n label, continue_val, indicator_id, emb_128_id, emb_64_single_id,\r\n multi_doc_ad_category_id, multi_doc_ad_category_id_mask,\r\n multi_doc_event_entity_id, multi_doc_event_entity_id_mask,\r\n multi_doc_ad_entity_id, multi_doc_ad_entity_id_mask,\r\n multi_doc_event_topic_id, multi_doc_event_topic_id_mask,\r\n multi_doc_event_category_id, multi_doc_event_category_id_mask,\r\n multi_doc_ad_topic_id, multi_doc_ad_topic_id_mask, display_id,\r\n ad_id, display_ad_and_is_leak, is_leak)\r\n\r\n sens_w = P.Fill()(P.DType()(loss_w), P.Shape()(loss_w), self.sens) #\r\n sens_d = P.Fill()(P.DType()(loss_d), P.Shape()(loss_d), self.sens) #\r\n grads_w = self.grad_w(self.loss_net_w, weights_w)(\r\n label, continue_val, indicator_id, emb_128_id, emb_64_single_id,\r\n multi_doc_ad_category_id, multi_doc_ad_category_id_mask,\r\n multi_doc_event_entity_id, multi_doc_event_entity_id_mask,\r\n multi_doc_ad_entity_id, multi_doc_ad_entity_id_mask,\r\n multi_doc_event_topic_id, multi_doc_event_topic_id_mask,\r\n multi_doc_event_category_id, multi_doc_event_category_id_mask,\r\n multi_doc_ad_topic_id, multi_doc_ad_topic_id_mask, display_id,\r\n ad_id, display_ad_and_is_leak, is_leak, sens_w)\r\n grads_d = self.grad_d(self.loss_net_d, weights_d)(\r\n label, continue_val, indicator_id, emb_128_id, emb_64_single_id,\r\n multi_doc_ad_category_id, multi_doc_ad_category_id_mask,\r\n multi_doc_event_entity_id, multi_doc_event_entity_id_mask,\r\n multi_doc_ad_entity_id, multi_doc_ad_entity_id_mask,\r\n multi_doc_event_topic_id, multi_doc_event_topic_id_mask,\r\n multi_doc_event_category_id, multi_doc_event_category_id_mask,\r\n multi_doc_ad_topic_id, multi_doc_ad_topic_id_mask, display_id,\r\n ad_id, display_ad_and_is_leak, is_leak, sens_d)\r\n if self.reducer_flag:\r\n # apply grad reducer on grads\r\n grads_w = self.grad_reducer_w(grads_w)\r\n grads_d = self.grad_reducer_d(grads_d)\r\n return F.depend(loss_w, self.optimizer_w(grads_w)), F.depend(\r\n loss_d, self.optimizer_d(grads_d))\r\n\r\n\r\nclass PredictWithSigmoid(nn.Cell):\r\n \"\"\"\r\n PredictWithSigomid\r\n \"\"\"\r\n def __init__(self, network):\r\n super(PredictWithSigmoid, self).__init__()\r\n self.network = network\r\n self.sigmoid = P.Sigmoid()\r\n self.reshape = P.Reshape()\r\n\r\n def construct(self, label, continue_val, indicator_id, emb_128_id,\r\n emb_64_single_id, multi_doc_ad_category_id,\r\n multi_doc_ad_category_id_mask, multi_doc_event_entity_id,\r\n multi_doc_event_entity_id_mask, multi_doc_ad_entity_id,\r\n multi_doc_ad_entity_id_mask, multi_doc_event_topic_id,\r\n multi_doc_event_topic_id_mask, multi_doc_event_category_id,\r\n multi_doc_event_category_id_mask, multi_doc_ad_topic_id,\r\n multi_doc_ad_topic_id_mask, display_id, ad_id,\r\n display_ad_and_is_leak, is_leak):\r\n \"\"\"\r\n PredictWithSigomid construct\r\n \"\"\"\r\n logits, _, _, _ = self.network(\r\n continue_val, indicator_id, emb_128_id, emb_64_single_id,\r\n multi_doc_ad_category_id, multi_doc_ad_category_id_mask,\r\n multi_doc_event_entity_id, multi_doc_event_entity_id_mask,\r\n multi_doc_ad_entity_id, multi_doc_ad_entity_id_mask,\r\n multi_doc_event_topic_id, multi_doc_event_topic_id_mask,\r\n multi_doc_event_category_id, multi_doc_event_category_id_mask,\r\n multi_doc_ad_topic_id, multi_doc_ad_topic_id_mask, display_id,\r\n ad_id, display_ad_and_is_leak, is_leak)\r\n logits = self.reshape(logits, (-1,))\r\n pred_probs = self.sigmoid(logits)\r\n return logits, pred_probs, label, display_id\r\n" ]
[ [ "numpy.random.uniform" ], [ "numpy.char.encode", "numpy.char.decode" ], [ "numpy.random.seed", "numpy.frombuffer", "numpy.shape", "numpy.array", "numpy.zeros", "numpy.random.randint" ], [ "numpy.random.seed", "numpy.random.choice", "numpy.asarray", "numpy.reshape", "numpy.ones", "numpy.max", "numpy.random.permutation", "numpy.where", "numpy.flip", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.logical_not", "pandas.read_csv", "numpy.log2", "numpy.reshape", "numpy.arange", "numpy.cumsum", "numpy.argwhere", "numpy.concatenate", "numpy.all", "numpy.greater_equal", "numpy.iinfo", "numpy.mod", "numpy.repeat", "numpy.array", "numpy.zeros" ], [ "numpy.array", "numpy.zeros", "numpy.ones" ], [ "numpy.ones_like", "numpy.array_equal", "scipy.ndimage.filters.gaussian_filter", "numpy.array", "numpy.zeros" ], [ "numpy.array", "numpy.zeros", "numpy.mean" ], [ "numpy.random.normal", "numpy.zeros", "numpy.sqrt", "numpy.ones" ], [ "numpy.int" ], [ "numpy.random.normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Gretacyh/images-downloader-fliter
[ "ffe070026a45c741013a575a6a985d97e28d6fd7" ]
[ "img_filter/img_advanced_filter.py" ]
[ "import os\nimport re\nimport cv2\nimport umap\nimport torch\nimport torch.nn as nn\nfrom torchvision import models\nimport torch.nn.functional as F\nimport numpy as np\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\n\n\ndef global_std_pool2d(x):\n \"\"\"2D global standard variation pooling\"\"\"\n return torch.std(x.view(x.size()[0], x.size()[1], -1, 1), dim=2, keepdim=True)\n\n\nclass ResNet50(torch.nn.Module):\n \"\"\"Modified ResNet50 for feature extraction\"\"\"\n\n def __init__(self):\n super(ResNet50, self).__init__()\n self.features = nn.Sequential(*list(models.resnet50(pretrained=True).children())[:-2])\n # 冻结模型\n for p in self.features.parameters():\n p.requires_grad = False\n # 检测是否有GPU\n if torch.cuda.is_available():\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n self.to(self.device)\n\n def forward(self, x):\n # features@: 7->res5c\n for ii, model in enumerate(self.features):\n x = model(x)\n if ii == 7:\n features_mean = nn.functional.adaptive_avg_pool2d(x, 1)\n features_std = global_std_pool2d(x)\n return features_mean, features_std\n\n\n# 提取图像特征\ndef get_img_feature(model, img_path):\n img = cv2.imread(img_path, flags=cv2.IMREAD_COLOR)\n img = torch.from_numpy(img)\n img = img.to(model.device).float()\n img = torch.unsqueeze(img, 0) # batch size 1\n img = img.permute(0, 3, 1, 2)\n feature = model(img)\n return feature\n\n\n# UMAP降维\ndef do_umap(features, channel=2, random_state=None):\n model = umap.UMAP(n_components=channel, random_state=random_state)\n return model.fit_transform(features), model\n\n\n# t-SNE降维\ndef do_tsne(data, random_state=0):\n tsne = TSNE(n_components=2, init='pca', random_state=random_state)\n return tsne.fit_transform(data), tsne\n\n\n# 绘制数据图像\ndef plot_embedding(data, type=None, text=None, title=\"\", colors=None):\n if type is None:\n type = np.zeros_like(data[:, 0])\n x_min, x_max = np.min(data, 0), np.max(data, 0)\n data = (data - x_min) / (x_max - x_min)\n\n fig = plt.figure()\n ax = plt.subplot(111)\n for i in range(data.shape[0]):\n if text is not None:\n plt.text(data[i, 0], data[i, 1], str(text[i]),\n color=plt.cm.Set1((text[i] + 1) / 10.) if colors is None else colors[type[i]],\n fontdict={'weight': 'bold', 'size': 8})\n else:\n plt.scatter(data[i, 0], data[i, 1], s=3,\n color=plt.cm.Set1((text[i] + 1) / 10.) if colors is None else colors[type[i]])\n plt.xticks([])\n plt.yticks([])\n plt.title(title)\n plt.show()\n return fig\n\n\nif __name__ == '__main__':\n root_dir = \"/root/yanghan/cat\"\n file_suffix = \"jpeg|jpg|png\"\n remove_dir = root_dir + \"/remove\"\n if not os.path.exists(remove_dir):\n os.makedirs(remove_dir)\n\n # 模型初始化\n model = ResNet50()\n # 提取图像特征\n feature_list = []\n name_list = []\n for img_name in os.listdir(root_dir)[:]:\n # 对处理文件的类型进行过滤\n if re.search(file_suffix, img_name) is None:\n continue\n img_path = root_dir + \"/\" + img_name\n mean, std = get_img_feature(model, img_path)\n mean = mean.to('cpu').numpy().reshape(-1)\n std = std.to('cpu').numpy().reshape(-1)\n feature = np.concatenate((mean, std), 0)\n print(feature.shape)\n feature_list.append(feature)\n name_list.append(img_name[7:10])\n\n # 特征绘图\n feature_list = np.array(feature_list)\n name_list = np.array(name_list)\n feature_list_tsne, _ = do_tsne(feature_list)\n plot_embedding(feature_list_tsne, title=\"tsne\", text=name_list)\n feature_list_umap, _ = do_umap(feature_list)\n plot_embedding(feature_list_umap, title=\"umap\", text=name_list)\n cv2.waitKey()\n" ]
[ [ "numpy.array", "matplotlib.pyplot.yticks", "matplotlib.pyplot.title", "numpy.min", "torch.from_numpy", "torch.unsqueeze", "torch.nn.functional.adaptive_avg_pool2d", "numpy.concatenate", "sklearn.manifold.TSNE", "numpy.max", "matplotlib.pyplot.subplot", "numpy.zeros_like", "matplotlib.pyplot.cm.Set1", "torch.cuda.is_available", "torch.device", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ZZR0/ISSTA21-JIT-DP
[ "c2916f7c3b1d235ff2858220886d6a7da068bf8a", "c2916f7c3b1d235ff2858220886d6a7da068bf8a", "c2916f7c3b1d235ff2858220886d6a7da068bf8a" ]
[ "DeepJIT/train.py", "JIT_Baseline/baseline.py", "JIT_Baseline/LR.py" ]
[ "from model import DeepJIT\nimport torch \nfrom tqdm import tqdm\nfrom utils import mini_batches_train, save\nimport torch.nn as nn\nimport os, datetime\n\ndef train_model(data, params):\n data_pad_msg, data_pad_code, data_labels, dict_msg, dict_code = data\n \n # set up parameters\n params.cuda = (not params.no_cuda) and torch.cuda.is_available()\n del params.no_cuda\n params.filter_sizes = [int(k) for k in params.filter_sizes.split(',')]\n\n # params.save_dir = os.path.join(params.save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\n\n params.vocab_msg, params.vocab_code = len(dict_msg), len(dict_code) \n\n if len(data_labels.shape) == 1:\n params.class_num = 1\n else:\n params.class_num = data_labels.shape[1]\n params.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n # create and train the defect model\n model = DeepJIT(args=params)\n if torch.cuda.is_available():\n model = model.cuda()\n optimizer = torch.optim.Adam(model.parameters(), lr=params.l2_reg_lambda)\n\n criterion = nn.BCELoss()\n for epoch in range(1, params.num_epochs + 1):\n total_loss = 0\n # building batches for training model\n batches = mini_batches_train(X_msg=data_pad_msg, X_code=data_pad_code, Y=data_labels, mini_batch_size=params.batch_size)\n for i, (batch) in enumerate(tqdm(batches)):\n pad_msg, pad_code, labels = batch\n if torch.cuda.is_available(): \n pad_msg, pad_code, labels = torch.tensor(pad_msg).cuda(), torch.tensor(\n pad_code).cuda(), torch.cuda.FloatTensor(labels)\n else: \n pad_msg, pad_code, labels = torch.tensor(pad_msg).long(), torch.tensor(pad_code).long(), torch.tensor(\n labels).float()\n\n optimizer.zero_grad()\n predict = model.forward(pad_msg, pad_code)\n loss = criterion(predict, labels)\n total_loss += loss\n loss.backward()\n optimizer.step()\n\n print('Epoch %i / %i -- Total loss: %f' % (epoch, params.num_epochs, total_loss)) \n save(model, params.save_dir, 'epoch', epoch)\n", "import math\nimport random\nimport time\nimport argparse\n\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, roc_curve, auc\nimport pandas as pd\nimport numpy as np\nimport torch.nn as nn\nimport torch\n\nfrom LR import LR\nfrom DBN import DBN\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-project', type=str,\n default='qt')\nparser.add_argument('-data', type=str,\n default='k')\nparser.add_argument('-algorithm', type=str,\n default='lr')\nparser.add_argument('-drop', type=str,\n default='')\nparser.add_argument('-only', nargs='+',\n default=[])\n\n\ndef evaluation_metrics(y_true, y_pred):\n fpr, tpr, thresholds = roc_curve(y_true=y_true, y_score=y_pred, pos_label=1)\n auc_ = auc(fpr, tpr)\n\n y_pred = [1 if p >= 0.5 else 0 for p in y_pred]\n acc = accuracy_score(y_true=y_true, y_pred=y_pred)\n prc = precision_score(y_true=y_true, y_pred=y_pred)\n rc = recall_score(y_true=y_true, y_pred=y_pred)\n # f1 = 2 * prc * rc / (prc + rc)\n f1 = 0\n return acc, prc, rc, f1, auc_\n\n\ndef replace_value_dataframe(df):\n df = df.replace({True: 1, False: 0})\n df = df.fillna(df.mean())\n if args.drop:\n df = df.drop(columns=[args.drop])\n elif args.only:\n df = df[['Unnamed: 0','_id','date','bug','__'] + args.only]\n return df.values\n\n\ndef get_features(data):\n # return the features of yasu data\n return data[:, 5:]\n\n\ndef get_ids(data):\n # return the labels of yasu data\n return data[:, 1:2].flatten().tolist()\n\n\ndef get_label(data):\n data = data[:, 3:4].flatten().tolist()\n data = [1 if int(d) > 0 else 0 for d in data]\n return data\n\n\ndef load_df_yasu_data(path_data):\n data = pd.read_csv(path_data)\n data = replace_value_dataframe(df=data)\n ids, labels, features = get_ids(data=data), get_label(data=data), get_features(data=data)\n indexes = list()\n cnt_noexits = 0\n for i in range(0, len(ids)):\n try:\n indexes.append(i)\n except FileNotFoundError:\n print('File commit id no exits', ids[i], cnt_noexits)\n cnt_noexits += 1\n ids = [ids[i] for i in indexes]\n labels = [labels[i] for i in indexes]\n features = features[indexes]\n return (ids, np.array(labels), features)\n\n\ndef load_yasu_data(args):\n train_path_data = 'data/{}/{}_train.csv'.format(args.project, args.data)\n test_path_data = 'data/{}/{}_test.csv'.format(args.project, args.data)\n train, test = load_df_yasu_data(train_path_data), load_df_yasu_data(test_path_data)\n return train, test\n\n\ndef train_and_evl(data, label, args):\n size = int(label.shape[0]*0.2)\n auc_ = []\n\n for i in range(5):\n idx = size * i\n X_e = data[idx:idx+size]\n y_e = label[idx:idx+size]\n\n X_t = np.vstack((data[:idx], data[idx+size:]))\n y_t = np.hstack((label[:idx], label[idx+size:]))\n\n\n model = LogisticRegression(max_iter=7000).fit(X_t, y_t)\n y_pred = model.predict_proba(X_e)[:, 1]\n fpr, tpr, thresholds = roc_curve(y_true=y_e, y_score=y_pred, pos_label=1)\n auc_.append(auc(fpr, tpr))\n\n return np.mean(auc_)\n\n\ndef mini_batches_update(X, Y, mini_batch_size=64, seed=0):\n m = X.shape[0] # number of training examples\n mini_batches = list()\n np.random.seed(seed)\n\n # Step 1: No shuffle (X, Y)\n shuffled_X, shuffled_Y = X, Y\n Y = Y.tolist()\n Y_pos = [i for i in range(len(Y)) if Y[i] == 1]\n Y_neg = [i for i in range(len(Y)) if Y[i] == 0]\n\n # Step 2: Randomly pick mini_batch_size / 2 from each of positive and negative labels\n num_complete_minibatches = int(math.floor(m / float(mini_batch_size))) + 1\n for k in range(0, num_complete_minibatches):\n indexes = sorted(\n random.sample(Y_pos, int(mini_batch_size / 2)) + random.sample(Y_neg, int(mini_batch_size / 2)))\n mini_batch_X, mini_batch_Y = shuffled_X[indexes], shuffled_Y[indexes]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n return mini_batches\n\n\ndef mini_batches(X, Y, mini_batch_size=64, seed=0):\n m = X.shape[0] # number of training examples\n mini_batches = list()\n np.random.seed(seed)\n\n # Step 1: No shuffle (X, Y)\n shuffled_X, shuffled_Y = X, Y\n\n # Step 2: Partition (X, Y). Minus the end case.\n # number of mini batches of size mini_batch_size in your partitioning\n num_complete_minibatches = int(math.floor(m / float(mini_batch_size)))\n\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]\n if len(Y.shape) == 1:\n mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size]\n else:\n mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n\n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size: m, :]\n if len(Y.shape) == 1:\n mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m]\n else:\n mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m, :]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n return mini_batches\n\n\ndef DBN_JIT(train_features, train_labels, test_features, test_labels, hidden_units=[20, 12, 12], num_epochs_LR=200):\n # training DBN model\n #################################################################################################\n starttime = time.time()\n dbn_model = DBN(visible_units=train_features.shape[1],\n hidden_units=hidden_units,\n use_gpu=False)\n dbn_model.train_static(train_features, train_labels, num_epochs=10)\n # Finishing the training DBN model\n # print('---------------------Finishing the training DBN model---------------------')\n # using DBN model to construct features\n DBN_train_features, _ = dbn_model.forward(train_features)\n DBN_test_features, _ = dbn_model.forward(test_features)\n DBN_train_features = DBN_train_features.numpy()\n DBN_test_features = DBN_test_features.numpy()\n\n train_features = np.hstack((train_features, DBN_train_features))\n test_features = np.hstack((test_features, DBN_test_features))\n\n\n if len(train_labels.shape) == 1:\n num_classes = 1\n else:\n num_classes = train_labels.shape[1]\n # lr_model = LR(input_size=hidden_units, num_classes=num_classes)\n lr_model = LR(input_size=train_features.shape[1], num_classes=num_classes)\n optimizer = torch.optim.Adam(lr_model.parameters(), lr=0.00001)\n steps = 0\n batches_test = mini_batches(X=test_features, Y=test_labels)\n for epoch in range(1, num_epochs_LR + 1):\n # building batches for training model\n batches_train = mini_batches_update(X=train_features, Y=train_labels)\n for batch in batches_train:\n x_batch, y_batch = batch\n x_batch, y_batch = torch.tensor(x_batch).float(), torch.tensor(y_batch).float()\n\n optimizer.zero_grad()\n predict = lr_model.forward(x_batch)\n loss = nn.BCELoss()\n loss = loss(predict, y_batch)\n loss.backward()\n optimizer.step()\n\n # steps += 1\n # if steps % 100 == 0:\n # print('\\rEpoch: {} step: {} - loss: {:.6f}'.format(epoch, steps, loss.item()))\n\n endtime = time.time()\n dtime = endtime - starttime\n print(\"Train Time: %.8s s\" % dtime) #显示到微秒 \n\n starttime = time.time()\n y_pred, lables = lr_model.predict(data=batches_test)\n endtime = time.time()\n dtime = endtime - starttime\n print(\"Eval Time: %.8s s\" % dtime) #显示到微秒 \n return y_pred\n\n\ndef baseline_algorithm(train, test, algorithm, only=False):\n _, y_train, X_train = train\n _, y_test, X_test = test\n X_train, X_test = preprocessing.scale(X_train), preprocessing.scale(X_test)\n\n acc, prc, rc, f1, auc_ = 0, 0, 0, 0, 0\n if algorithm == 'lr':\n starttime = time.time()\n model = LogisticRegression(max_iter=7000).fit(X_train, y_train)\n endtime = time.time()\n dtime = endtime - starttime\n print(\"Train Time: %.8s s\" % dtime) #显示到微秒 \n\n starttime = time.time()\n y_pred = model.predict_proba(X_test)[:, 1]\n endtime = time.time()\n dtime = endtime - starttime\n print(\"Eval Time: %.8s s\" % dtime) #显示到微秒 \n acc, prc, rc, f1, auc_ = evaluation_metrics(y_true=y_test, y_pred=y_pred)\n if only and not \"cross\" in args.data:\n auc_ = train_and_evl(X_train, y_train, args)\n print('Accuracy: %f -- Precision: %f -- Recall: %f -- F1: %f -- AUC: %f' % (acc, prc, rc, f1, auc_))\n elif algorithm =='dbn':\n y_pred = DBN_JIT(X_train, y_train, X_test, y_test)\n acc, prc, rc, f1, auc_ = evaluation_metrics(y_true=y_test, y_pred=y_pred)\n acc, prc, rc, f1 = 0, 0, 0, 0\n print('Accuracy: %f -- Precision: %f -- Recall: %f -- F1: %f -- AUC: %f' % (acc, prc, rc, f1, auc_))\n else:\n print('You need to give the correct algorithm name')\n return\n\n return y_test, y_pred \n\n\ndef save_result(labels, predicts, path):\n results = []\n for lable, predict in zip(labels, predicts):\n results.append('{}\\t{}\\n'.format(lable, predict))\n \n with open(path, 'w', encoding='utf-8') as f:\n f.writelines(results)\n\n\nif __name__ == '__main__':\n\n args = parser.parse_args()\n\n save_path = 'result/{}/{}_{}_{}.result'.format(args.project, args.project, args.algorithm, args.data.replace(\"/\",\"_\"))\n only = True if args.only else False\n if args.algorithm == 'la':\n args.algorithm = 'lr'\n args.only = ['la']\n if \"all\" in args.only:\n args.only.remove(\"all\")\n\n train, test = load_yasu_data(args)\n \n labels, predicts = baseline_algorithm(train=train, test=test, algorithm=args.algorithm, only=only)\n\n if not only:\n save_result(labels, predicts, save_path)", "import torch\nimport torch.nn as nn\n\nclass LR(nn.Module):\n def __init__(self, input_size, num_classes):\n super(LR, self).__init__()\n # self.fc = nn.Linear(input_size, 128)\n # self.fc1 = nn.Linear(128, 256)\n # self.fc2 = nn.Linear(256, 64)\n # self.fc3 = nn.Linear(64, num_classes)\n\n self.fc = nn.Linear(input_size, num_classes)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, input_size):\n # out = self.fc(input_size)\n # out = self.fc1(out)\n # out = self.fc2(out)\n # out = self.fc3(out)\n\n out = self.fc(input_size)\n out = self.sigmoid(out).squeeze(1)\n return out\n\n def predict(self, data):\n with torch.no_grad():\n self.eval() # since we use drop out\n all_predict, all_label = list(), list()\n for batch in data:\n x, y = batch\n x = torch.tensor(x).float()\n\n predict = self.forward(x).detach().numpy().tolist()\n all_predict += predict\n all_label += y.tolist()\n # acc, prc, rc, f1, auc_ = evaluation_metrics(y_pred=all_predict, y_true=all_label)\n # print('Accuracy: %f -- Precision: %f -- Recall: %f -- F1: %f -- AUC: %f' % (acc, prc, rc, f1, auc_))\n return all_predict, all_label\n" ]
[ [ "torch.cuda.FloatTensor", "torch.tensor", "torch.cuda.is_available", "torch.nn.BCELoss" ], [ "numpy.hstack", "pandas.read_csv", "sklearn.linear_model.LogisticRegression", "numpy.random.seed", "sklearn.metrics.precision_score", "sklearn.metrics.roc_curve", "torch.nn.BCELoss", "torch.tensor", "numpy.mean", "sklearn.metrics.auc", "sklearn.preprocessing.scale", "numpy.array", "sklearn.metrics.recall_score", "numpy.vstack", "sklearn.metrics.accuracy_score" ], [ "torch.tensor", "torch.nn.Linear", "torch.no_grad", "torch.nn.Sigmoid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
uve/tensorflow
[ "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "1fa4cd6a566c8745f455fc3d2273208f21f88ced", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "1fa4cd6a566c8745f455fc3d2273208f21f88ced", "1fa4cd6a566c8745f455fc3d2273208f21f88ced", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "1fa4cd6a566c8745f455fc3d2273208f21f88ced", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "1fa4cd6a566c8745f455fc3d2273208f21f88ced", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "1fa4cd6a566c8745f455fc3d2273208f21f88ced", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "1fa4cd6a566c8745f455fc3d2273208f21f88ced", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "1fa4cd6a566c8745f455fc3d2273208f21f88ced", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "1fa4cd6a566c8745f455fc3d2273208f21f88ced", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080" ]
[ "tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py", "tensorflow/python/kernel_tests/signal/fft_ops_test.py", "tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py", "tensorflow/python/ops/distributions/distributions.py", "tensorflow/contrib/distributions/python/kernel_tests/mvn_full_covariance_test.py", "tensorflow/python/keras/initializers_test.py", "tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py", "tensorflow/python/keras/engine/training_v2_utils.py", "tensorflow/python/training/tensorboard_logging.py", "tensorflow/python/keras/utils/data_utils.py", "tensorflow/python/ops/ragged/ragged_squeeze_op_test.py", "tensorflow/contrib/eager/python/examples/linear_regression/linear_regression_test.py", "tensorflow/python/data/experimental/kernel_tests/serialization/unique_dataset_serialization_test.py", "tensorflow/contrib/metrics/python/ops/metric_ops_large_test.py", "tensorflow/python/data/ops/readers.py", "tensorflow/python/training/server_lib_same_variables_no_clear_test.py", "tensorflow/contrib/distributions/python/kernel_tests/bijectors/permute_test.py", "tensorflow/contrib/factorization/python/ops/wals.py", "tensorflow/python/ops/random_grad.py", "tensorflow/contrib/session_bundle/exporter.py", "tensorflow/python/training/warm_starting_util.py", "tensorflow/python/ops/tensor_array_ops.py", "tensorflow/contrib/bayesflow/python/ops/monte_carlo.py", "tensorflow/contrib/distributions/python/ops/bijectors/__init__.py", "tensorflow/python/data/kernel_tests/shard_test.py", "tensorflow/python/keras/utils/multi_gpu_utils.py", "tensorflow/python/kernel_tests/linalg/linear_operator_householder_test.py", "tensorflow/python/data/experimental/kernel_tests/serialization/ignore_errors_serialization_test.py", "tensorflow/contrib/metrics/python/ops/confusion_matrix_ops.py", "tensorflow/python/distribute/cluster_resolver/gce_cluster_resolver.py", "tensorflow/python/distribute/model_collection/simple_models.py", "tensorflow/python/ops/batch_norm_benchmark.py", "tensorflow/contrib/opt/python/training/adamax.py", "tensorflow/contrib/boosted_trees/python/kernel_tests/stats_accumulator_ops_test.py", "tensorflow/python/kernel_tests/distributions/normal_test.py", "tensorflow/python/tools/strip_unused.py", "tensorflow/python/tpu/profiler/capture_tpu_profile.py", "tensorflow/contrib/boosted_trees/python/ops/quantile_ops.py", "tensorflow/python/keras/layers/gru_v2_test.py", "tensorflow/contrib/framework/python/ops/checkpoint_ops.py", "tensorflow/python/ops/check_ops.py", "tensorflow/python/framework/composite_tensor_utils_test.py", "tensorflow/contrib/learn/python/learn/estimators/estimator_test_utils.py", "tensorflow/python/data/experimental/ops/threading_options.py", "tensorflow/contrib/hadoop/python/ops/hadoop_dataset_ops.py", "tensorflow/python/ops/batch_ops.py", "tensorflow/compiler/tests/pooling_ops_3d_test.py", "tensorflow/python/data/kernel_tests/filter_with_legacy_function_test.py", "tensorflow/lite/testing/model_coverage/model_coverage_lib.py", "tensorflow/python/training/experimental/loss_scale_test.py", "tensorflow/contrib/resampler/__init__.py", "tensorflow/python/distribute/central_storage_strategy.py", "tensorflow/compiler/tests/fake_quant_ops_test.py", "tensorflow/python/tpu/topology.py", "tensorflow/python/training/basic_session_run_hooks.py", "tensorflow/contrib/gan/python/estimator/python/tpu_gan_estimator.py", "tensorflow/contrib/eager/python/metrics.py", "tensorflow/python/distribute/cross_device_utils_test.py", "tensorflow/contrib/timeseries/examples/lstm.py", "tensorflow/python/distribute/keras_metrics_test.py", "tensorflow/contrib/eager/python/examples/revnet/revnet_test.py", "tensorflow/examples/get_started/regression/imports85.py", "tensorflow/python/data/kernel_tests/filter_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\nr\"\"\"Multivariate autoregressive model (vector autoregression).\r\n\r\nImplements the following model (num_blocks = max(ar_order, ma_order + 1)):\r\n\r\n y(t, 1) = \\sum_{i=1}^{ar_order} ar_coefs[i] * y(t - 1, i)\r\n y(t, i) = y(t - 1, i - 1) + ma_coefs[i - 1] * e(t) for 1 < i < num_blocks\r\n y(t, num_blocks) = y(t - 1, num_blocks - 1) + e(t)\r\n\r\nWhere e(t) are Gaussian with zero mean and learned covariance.\r\n\r\nEach element of ar_coefs and ma_coefs is a [num_features x num_features]\r\nmatrix. Each y(t, i) is a vector of length num_features. Indices in the above\r\nequations are one-based. Initial conditions y(0, i) come from prior state (which\r\nmay either be learned or left as a constant with high prior covariance).\r\n\r\nIf ar_order > ma_order, the observation model is:\r\n y(t, 1) + observation_noise(t)\r\n\r\nIf ma_order >= ar_order, it is (to observe the moving average component):\r\n y(t, 1) + y(t, num_blocks) + observation_noise(t)\r\n\r\nWhere observation_noise(t) are Gaussian with zero mean and learned covariance.\r\n\r\nThis implementation uses a formulation which puts all of the autoregressive\r\ncoefficients in the transition equation for the observed component, which\r\nenables learning using truncated backpropagation. Noise is not applied directly\r\nto the observed component (with the exception of standard observation noise),\r\nwhich further aids learning of the autoregressive coefficients when VARMA is in\r\nan ensemble with other models (in which case having an observation noise term is\r\nusually unavoidable).\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.contrib.timeseries.python.timeseries import math_utils\r\nfrom tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model\r\n\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import init_ops\r\nfrom tensorflow.python.ops import linalg_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import variable_scope\r\n\r\n\r\nclass VARMA(state_space_model.StateSpaceModel):\r\n \"\"\"A VARMA model implementation as a special case of the state space model.\"\"\"\r\n\r\n def __init__(self,\r\n autoregressive_order,\r\n moving_average_order,\r\n configuration=state_space_model.StateSpaceModelConfiguration()):\r\n \"\"\"Construct a VARMA model.\r\n\r\n The size of the latent state for this model is:\r\n num_features * max(autoregressive_order, moving_average_order + 1)\r\n Square matrices of this size are constructed and multiplied.\r\n\r\n Args:\r\n autoregressive_order: The maximum autoregressive lag.\r\n moving_average_order: The maximum moving average lag, after which\r\n transient deviations are expected to return to their long-term mean.\r\n configuration: A StateSpaceModelConfiguration object.\r\n \"\"\"\r\n self.ar_order = autoregressive_order\r\n self.ma_order = moving_average_order\r\n self.state_num_blocks = max(autoregressive_order, moving_average_order + 1)\r\n super(VARMA, self).__init__(configuration=configuration)\r\n self.state_dimension = self.state_num_blocks * self.num_features\r\n\r\n def _define_parameters(self, observation_transition_tradeoff_log=None):\r\n with variable_scope.variable_scope(self._variable_scope):\r\n # TODO(allenl): Evaluate parameter transformations for AR/MA coefficients\r\n # which improve interpretability/stability.\r\n self.ar_coefs = variable_scope.get_variable(\r\n name=\"ar_coefs\",\r\n shape=[self.num_features, self.num_features, self.ar_order],\r\n dtype=self.dtype,\r\n initializer=init_ops.zeros_initializer())\r\n self.ma_coefs = variable_scope.get_variable(\r\n name=\"ma_coefs\",\r\n initializer=array_ops.tile(\r\n linalg_ops.eye(self.num_features, dtype=self.dtype)[None, :, :],\r\n [self.ma_order, 1, 1]),\r\n dtype=self.dtype)\r\n super(VARMA, self)._define_parameters(\r\n observation_transition_tradeoff_log=observation_transition_tradeoff_log)\r\n\r\n def get_state_transition(self):\r\n \"\"\"Construct state transition matrix from VARMA parameters.\r\n\r\n Returns:\r\n the state transition matrix. It has shape\r\n [self.state_dimension, self.state_dimension].\r\n \"\"\"\r\n # Pad any unused AR blocks with zeros. The extra state is necessary if\r\n # ma_order >= ar_order.\r\n ar_coefs_padded = array_ops.reshape(\r\n array_ops.pad(self.ar_coefs,\r\n [[0, 0], [0, 0],\r\n [0, self.state_num_blocks - self.ar_order]]),\r\n [self.num_features, self.state_dimension])\r\n shift_matrix = array_ops.pad(\r\n linalg_ops.eye(\r\n (self.state_num_blocks - 1) * self.num_features, dtype=self.dtype),\r\n [[0, 0], [0, self.num_features]])\r\n return array_ops.concat([ar_coefs_padded, shift_matrix], axis=0)\r\n\r\n def get_noise_transform(self):\r\n \"\"\"Construct state noise transform matrix from VARMA parameters.\r\n\r\n Returns:\r\n the state noise transform matrix. It has shape\r\n [self.state_dimension, self.num_features].\r\n \"\"\"\r\n # Noise is broadcast, through the moving average coefficients, to\r\n # un-observed parts of the latent state.\r\n ma_coefs_padded = array_ops.reshape(\r\n array_ops.pad(self.ma_coefs,\r\n [[self.state_num_blocks - 1 - self.ma_order, 0], [0, 0],\r\n [0, 0]]),\r\n [(self.state_num_blocks - 1) * self.num_features, self.num_features],\r\n name=\"noise_transform\")\r\n # Deterministically apply noise to the oldest component.\r\n return array_ops.concat(\r\n [ma_coefs_padded,\r\n linalg_ops.eye(self.num_features, dtype=self.dtype)],\r\n axis=0)\r\n\r\n def get_observation_model(self, times):\r\n \"\"\"Construct observation model matrix from VARMA parameters.\r\n\r\n Args:\r\n times: A [batch size] vector indicating the times observation models are\r\n requested for. Unused.\r\n Returns:\r\n the observation model matrix. It has shape\r\n [self.num_features, self.state_dimension].\r\n \"\"\"\r\n del times # StateSpaceModel will broadcast along the batch dimension\r\n if self.ar_order > self.ma_order or self.state_num_blocks < 2:\r\n return array_ops.pad(\r\n linalg_ops.eye(self.num_features, dtype=self.dtype),\r\n [[0, 0], [0, self.num_features * (self.state_num_blocks - 1)]],\r\n name=\"observation_model\")\r\n else:\r\n # Add a second observed component which \"catches\" the accumulated moving\r\n # average errors as they reach the end of the state. If ar_order >\r\n # ma_order, this is unnecessary, since accumulated errors cycle naturally.\r\n return array_ops.concat(\r\n [\r\n array_ops.pad(\r\n linalg_ops.eye(self.num_features, dtype=self.dtype),\r\n [[0, 0], [0,\r\n self.num_features * (self.state_num_blocks - 2)]]),\r\n linalg_ops.eye(self.num_features, dtype=self.dtype)\r\n ],\r\n axis=1,\r\n name=\"observation_model\")\r\n\r\n def get_state_transition_noise_covariance(\r\n self, minimum_initial_variance=1e-5):\r\n # Most state space models use only an explicit observation noise term to\r\n # model deviations from expectations, and so a low initial transition noise\r\n # parameter is helpful there. Since deviations from expectations are also\r\n # modeled as transition noise in VARMA, we set its initial value based on a\r\n # slight over-estimate empirical observation noise.\r\n if self._input_statistics is not None:\r\n feature_variance = self._scale_variance(\r\n self._input_statistics.series_start_moments.variance)\r\n initial_transition_noise_scale = math_ops.log(\r\n math_ops.maximum(\r\n math_ops.reduce_mean(feature_variance), minimum_initial_variance))\r\n else:\r\n initial_transition_noise_scale = 0.\r\n state_noise_transform = ops.convert_to_tensor(\r\n self.get_noise_transform(), dtype=self.dtype)\r\n state_noise_dimension = tensor_shape.dimension_value(\r\n state_noise_transform.shape[1])\r\n return math_utils.variable_covariance_matrix(\r\n state_noise_dimension, \"state_transition_noise\",\r\n dtype=self.dtype,\r\n initial_overall_scale_log=initial_transition_noise_scale)\r\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for fft operations.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nfrom six.moves import xrange # pylint: disable=redefined-builtin\r\n\r\nfrom tensorflow.core.protobuf import config_pb2\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import errors\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import gen_spectral_ops\r\nfrom tensorflow.python.ops import gradient_checker\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import spectral_ops_test_util\r\nfrom tensorflow.python.ops.signal import fft_ops\r\nfrom tensorflow.python.platform import test\r\n\r\nVALID_FFT_RANKS = (1, 2, 3)\r\n\r\n\r\nclass BaseFFTOpsTest(test.TestCase):\r\n\r\n def _compare(self, x, rank, fft_length=None, use_placeholder=False,\r\n rtol=1e-4, atol=1e-4):\r\n self._compareForward(x, rank, fft_length, use_placeholder, rtol, atol)\r\n self._compareBackward(x, rank, fft_length, use_placeholder, rtol, atol)\r\n\r\n def _compareForward(self, x, rank, fft_length=None, use_placeholder=False,\r\n rtol=1e-4, atol=1e-4):\r\n x_np = self._npFFT(x, rank, fft_length)\r\n if use_placeholder:\r\n x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))\r\n x_tf = self._tfFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})\r\n else:\r\n x_tf = self._tfFFT(x, rank, fft_length)\r\n\r\n self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)\r\n\r\n def _compareBackward(self, x, rank, fft_length=None, use_placeholder=False,\r\n rtol=1e-4, atol=1e-4):\r\n x_np = self._npIFFT(x, rank, fft_length)\r\n if use_placeholder:\r\n x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))\r\n x_tf = self._tfIFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})\r\n else:\r\n x_tf = self._tfIFFT(x, rank, fft_length)\r\n\r\n self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)\r\n\r\n def _checkMemoryFail(self, x, rank):\r\n config = config_pb2.ConfigProto()\r\n config.gpu_options.per_process_gpu_memory_fraction = 1e-2\r\n with self.cached_session(config=config, force_gpu=True):\r\n self._tfFFT(x, rank, fft_length=None)\r\n\r\n def _checkGradComplex(self, func, x, y, result_is_complex=True,\r\n rtol=1e-2, atol=1e-2):\r\n with self.cached_session(use_gpu=True):\r\n inx = ops.convert_to_tensor(x)\r\n iny = ops.convert_to_tensor(y)\r\n # func is a forward or inverse, real or complex, batched or unbatched FFT\r\n # function with a complex input.\r\n z = func(math_ops.complex(inx, iny))\r\n # loss = sum(|z|^2)\r\n loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))\r\n\r\n ((x_jacob_t, x_jacob_n),\r\n (y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(\r\n [inx, iny], [list(x.shape), list(y.shape)],\r\n loss, [1],\r\n x_init_value=[x, y],\r\n delta=1e-2)\r\n\r\n self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)\r\n self.assertAllClose(y_jacob_t, y_jacob_n, rtol=rtol, atol=atol)\r\n\r\n def _checkGradReal(self, func, x, rtol=1e-2, atol=1e-2):\r\n with self.cached_session(use_gpu=True):\r\n inx = ops.convert_to_tensor(x)\r\n # func is a forward RFFT function (batched or unbatched).\r\n z = func(inx)\r\n # loss = sum(|z|^2)\r\n loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))\r\n x_jacob_t, x_jacob_n = test.compute_gradient(\r\n inx, list(x.shape), loss, [1], x_init_value=x, delta=1e-2)\r\n\r\n self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)\r\n\r\n\r\nclass FFTOpsTest(BaseFFTOpsTest):\r\n\r\n def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):\r\n # fft_length unused for complex FFTs.\r\n with self.cached_session(use_gpu=True) as sess:\r\n return sess.run(self._tfFFTForRank(rank)(x), feed_dict=feed_dict)\r\n\r\n def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):\r\n # fft_length unused for complex FFTs.\r\n with self.cached_session(use_gpu=True) as sess:\r\n return sess.run(self._tfIFFTForRank(rank)(x), feed_dict=feed_dict)\r\n\r\n def _npFFT(self, x, rank, fft_length=None):\r\n if rank == 1:\r\n return np.fft.fft2(x, s=fft_length, axes=(-1,))\r\n elif rank == 2:\r\n return np.fft.fft2(x, s=fft_length, axes=(-2, -1))\r\n elif rank == 3:\r\n return np.fft.fft2(x, s=fft_length, axes=(-3, -2, -1))\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n def _npIFFT(self, x, rank, fft_length=None):\r\n if rank == 1:\r\n return np.fft.ifft2(x, s=fft_length, axes=(-1,))\r\n elif rank == 2:\r\n return np.fft.ifft2(x, s=fft_length, axes=(-2, -1))\r\n elif rank == 3:\r\n return np.fft.ifft2(x, s=fft_length, axes=(-3, -2, -1))\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n def _tfFFTForRank(self, rank):\r\n if rank == 1:\r\n return fft_ops.fft\r\n elif rank == 2:\r\n return fft_ops.fft2d\r\n elif rank == 3:\r\n return fft_ops.fft3d\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n def _tfIFFTForRank(self, rank):\r\n if rank == 1:\r\n return fft_ops.ifft\r\n elif rank == 2:\r\n return fft_ops.ifft2d\r\n elif rank == 3:\r\n return fft_ops.ifft3d\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n @test_util.run_deprecated_v1\r\n def testEmpty(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type in (np.complex64, np.complex128):\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n x = np.zeros((0,) * dims).astype(np_type)\r\n self.assertEqual(x.shape, self._tfFFT(x, rank).shape)\r\n self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)\r\n\r\n @test_util.run_deprecated_v1\r\n def testBasic(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-8)):\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n self._compare(\r\n np.mod(np.arange(np.power(4, dims)), 10).reshape(\r\n (4,) * dims).astype(np_type), rank, rtol=tol, atol=tol)\r\n\r\n def testLargeBatch(self):\r\n if test.is_gpu_available(cuda_only=True):\r\n rank = 1\r\n for dims in xrange(rank, rank + 3):\r\n for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-5)):\r\n self._compare(\r\n np.mod(np.arange(np.power(128, dims)), 10).reshape(\r\n (128,) * dims).astype(np_type), rank, rtol=tol, atol=tol)\r\n\r\n # TODO(yangzihao): Disable before we can figure out a way to\r\n # properly test memory fail for large batch fft.\r\n # def testLargeBatchMemoryFail(self):\r\n # if test.is_gpu_available(cuda_only=True):\r\n # rank = 1\r\n # for dims in xrange(rank, rank + 3):\r\n # self._checkMemoryFail(\r\n # np.mod(np.arange(np.power(128, dims)), 64).reshape(\r\n # (128,) * dims).astype(np.complex64), rank)\r\n\r\n @test_util.run_deprecated_v1\r\n def testBasicPlaceholder(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-8)):\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n self._compare(\r\n np.mod(np.arange(np.power(4, dims)), 10).reshape(\r\n (4,) * dims).astype(np_type),\r\n rank, use_placeholder=True, rtol=tol, atol=tol)\r\n\r\n @test_util.run_deprecated_v1\r\n def testRandom(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 5e-6)):\r\n def gen(shape):\r\n n = np.prod(shape)\r\n re = np.random.uniform(size=n)\r\n im = np.random.uniform(size=n)\r\n return (re + im * 1j).reshape(shape)\r\n\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n self._compare(gen((4,) * dims).astype(np_type), rank,\r\n rtol=tol, atol=tol)\r\n\r\n @test_util.run_deprecated_v1\r\n def testRandom1D(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type in (np.complex64, np.complex128):\r\n has_gpu = test.is_gpu_available(cuda_only=True)\r\n tol = {(np.complex64, True): 1e-4,\r\n (np.complex64, False): 1e-2,\r\n (np.complex128, True): 1e-4,\r\n (np.complex128, False): 1e-2}[(np_type, has_gpu)]\r\n def gen(shape):\r\n n = np.prod(shape)\r\n re = np.random.uniform(size=n)\r\n im = np.random.uniform(size=n)\r\n return (re + im * 1j).reshape(shape)\r\n\r\n # Check a variety of power-of-2 FFT sizes.\r\n for dim in (128, 256, 512, 1024):\r\n self._compare(gen((dim,)).astype(np_type), 1, rtol=tol, atol=tol)\r\n\r\n # Check a variety of non-power-of-2 FFT sizes.\r\n for dim in (127, 255, 511, 1023):\r\n self._compare(gen((dim,)).astype(np_type), 1, rtol=tol, atol=tol)\r\n\r\n @test_util.run_deprecated_v1\r\n def testError(self):\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(0, rank):\r\n x = np.zeros((1,) * dims).astype(np.complex64)\r\n with self.assertRaisesWithPredicateMatch(\r\n ValueError, \"Shape must be .*rank {}.*\".format(rank)):\r\n self._tfFFT(x, rank)\r\n with self.assertRaisesWithPredicateMatch(\r\n ValueError, \"Shape must be .*rank {}.*\".format(rank)):\r\n self._tfIFFT(x, rank)\r\n\r\n @test_util.run_deprecated_v1\r\n def testGrad_Simple(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type, tol in ((np.float32, 1e-4), (np.float64, 1e-10)):\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 2):\r\n re = np.ones(shape=(4,) * dims, dtype=np_type) / 10.0\r\n im = np.zeros(shape=(4,) * dims, dtype=np_type)\r\n self._checkGradComplex(self._tfFFTForRank(rank), re, im,\r\n rtol=tol, atol=tol)\r\n self._checkGradComplex(self._tfIFFTForRank(rank), re, im,\r\n rtol=tol, atol=tol)\r\n\r\n @test_util.run_deprecated_v1\r\n def testGrad_Random(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type, tol in ((np.float32, 1e-2), (np.float64, 1e-10)):\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 2):\r\n re = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1\r\n im = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1\r\n self._checkGradComplex(self._tfFFTForRank(rank), re, im,\r\n rtol=tol, atol=tol)\r\n self._checkGradComplex(self._tfIFFTForRank(rank), re, im,\r\n rtol=tol, atol=tol)\r\n\r\n\r\nclass RFFTOpsTest(BaseFFTOpsTest):\r\n\r\n def _compareBackward(self, x, rank, fft_length=None, use_placeholder=False):\r\n super(RFFTOpsTest, self)._compareBackward(x, rank, fft_length,\r\n use_placeholder)\r\n\r\n def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):\r\n with self.cached_session(use_gpu=True) as sess:\r\n return sess.run(\r\n self._tfFFTForRank(rank)(x, fft_length), feed_dict=feed_dict)\r\n\r\n def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):\r\n with self.cached_session(use_gpu=True) as sess:\r\n return sess.run(\r\n self._tfIFFTForRank(rank)(x, fft_length), feed_dict=feed_dict)\r\n\r\n def _npFFT(self, x, rank, fft_length=None):\r\n if rank == 1:\r\n return np.fft.rfft2(x, s=fft_length, axes=(-1,))\r\n elif rank == 2:\r\n return np.fft.rfft2(x, s=fft_length, axes=(-2, -1))\r\n elif rank == 3:\r\n return np.fft.rfft2(x, s=fft_length, axes=(-3, -2, -1))\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n def _npIFFT(self, x, rank, fft_length=None):\r\n if rank == 1:\r\n return np.fft.irfft2(x, s=fft_length, axes=(-1,))\r\n elif rank == 2:\r\n return np.fft.irfft2(x, s=fft_length, axes=(-2, -1))\r\n elif rank == 3:\r\n return np.fft.irfft2(x, s=fft_length, axes=(-3, -2, -1))\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n def _tfFFTForRank(self, rank):\r\n if rank == 1:\r\n return fft_ops.rfft\r\n elif rank == 2:\r\n return fft_ops.rfft2d\r\n elif rank == 3:\r\n return fft_ops.rfft3d\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n def _tfIFFTForRank(self, rank):\r\n if rank == 1:\r\n return fft_ops.irfft\r\n elif rank == 2:\r\n return fft_ops.irfft2d\r\n elif rank == 3:\r\n return fft_ops.irfft3d\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n @test_util.run_deprecated_v1\r\n def testEmpty(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n x = np.zeros((0,) * dims).astype(np.float32)\r\n self.assertEqual(x.shape, self._tfFFT(x, rank).shape)\r\n x = np.zeros((0,) * dims).astype(np.complex64)\r\n self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)\r\n\r\n @test_util.run_deprecated_v1\r\n def testBasic(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n for size in (5, 6):\r\n inner_dim = size // 2 + 1\r\n r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(\r\n (size,) * dims)\r\n self._compareForward(r2c.astype(np.float32), rank, (size,) * rank)\r\n c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),\r\n 10).reshape((size,) * (dims - 1) + (inner_dim,))\r\n self._compareBackward(\r\n c2r.astype(np.complex64), rank, (size,) * rank)\r\n\r\n def testLargeBatch(self):\r\n if test.is_gpu_available(cuda_only=True):\r\n rank = 1\r\n for dims in xrange(rank, rank + 3):\r\n for size in (64, 128):\r\n inner_dim = size // 2 + 1\r\n r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(\r\n (size,) * dims)\r\n self._compareForward(r2c.astype(np.float32), rank, (size,) * rank)\r\n c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),\r\n 10).reshape((size,) * (dims - 1) + (inner_dim,))\r\n self._compareBackward(c2r.astype(np.complex64), rank, (size,) * rank)\r\n\r\n @test_util.run_deprecated_v1\r\n def testBasicPlaceholder(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n for size in (5, 6):\r\n inner_dim = size // 2 + 1\r\n r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(\r\n (size,) * dims)\r\n self._compareForward(\r\n r2c.astype(np.float32),\r\n rank, (size,) * rank,\r\n use_placeholder=True)\r\n c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),\r\n 10).reshape((size,) * (dims - 1) + (inner_dim,))\r\n self._compareBackward(\r\n c2r.astype(np.complex64),\r\n rank, (size,) * rank,\r\n use_placeholder=True)\r\n\r\n @test_util.run_deprecated_v1\r\n def testFftLength(self):\r\n if test.is_gpu_available(cuda_only=True):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n for size in (5, 6):\r\n inner_dim = size // 2 + 1\r\n r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(\r\n (size,) * dims)\r\n c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),\r\n 10).reshape((size,) * (dims - 1) + (inner_dim,))\r\n # Test truncation (FFT size < dimensions).\r\n fft_length = (size - 2,) * rank\r\n self._compareForward(r2c.astype(np.float32), rank, fft_length)\r\n self._compareBackward(c2r.astype(np.complex64), rank, fft_length)\r\n # Confirm it works with unknown shapes as well.\r\n self._compareForward(\r\n r2c.astype(np.float32),\r\n rank,\r\n fft_length,\r\n use_placeholder=True)\r\n self._compareBackward(\r\n c2r.astype(np.complex64),\r\n rank,\r\n fft_length,\r\n use_placeholder=True)\r\n # Test padding (FFT size > dimensions).\r\n fft_length = (size + 2,) * rank\r\n self._compareForward(r2c.astype(np.float32), rank, fft_length)\r\n self._compareBackward(c2r.astype(np.complex64), rank, fft_length)\r\n # Confirm it works with unknown shapes as well.\r\n self._compareForward(\r\n r2c.astype(np.float32),\r\n rank,\r\n fft_length,\r\n use_placeholder=True)\r\n self._compareBackward(\r\n c2r.astype(np.complex64),\r\n rank,\r\n fft_length,\r\n use_placeholder=True)\r\n\r\n @test_util.run_deprecated_v1\r\n def testRandom(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n def gen_real(shape):\r\n n = np.prod(shape)\r\n re = np.random.uniform(size=n)\r\n ret = re.reshape(shape)\r\n return ret\r\n\r\n def gen_complex(shape):\r\n n = np.prod(shape)\r\n re = np.random.uniform(size=n)\r\n im = np.random.uniform(size=n)\r\n ret = (re + im * 1j).reshape(shape)\r\n return ret\r\n\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n for size in (5, 6):\r\n inner_dim = size // 2 + 1\r\n self._compareForward(gen_real((size,) * dims), rank, (size,) * rank)\r\n complex_dims = (size,) * (dims - 1) + (inner_dim,)\r\n self._compareBackward(\r\n gen_complex(complex_dims), rank, (size,) * rank)\r\n\r\n @test_util.run_deprecated_v1\r\n def testError(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(0, rank):\r\n x = np.zeros((1,) * dims).astype(np.complex64)\r\n with self.assertRaisesWithPredicateMatch(\r\n ValueError, \"Shape .* must have rank at least {}\".format(rank)):\r\n self._tfFFT(x, rank)\r\n with self.assertRaisesWithPredicateMatch(\r\n ValueError, \"Shape .* must have rank at least {}\".format(rank)):\r\n self._tfIFFT(x, rank)\r\n for dims in xrange(rank, rank + 2):\r\n x = np.zeros((1,) * rank)\r\n\r\n # Test non-rank-1 fft_length produces an error.\r\n fft_length = np.zeros((1, 1)).astype(np.int32)\r\n with self.assertRaisesWithPredicateMatch(ValueError,\r\n \"Shape .* must have rank 1\"):\r\n self._tfFFT(x, rank, fft_length)\r\n with self.assertRaisesWithPredicateMatch(ValueError,\r\n \"Shape .* must have rank 1\"):\r\n self._tfIFFT(x, rank, fft_length)\r\n\r\n # Test wrong fft_length length.\r\n fft_length = np.zeros((rank + 1,)).astype(np.int32)\r\n with self.assertRaisesWithPredicateMatch(\r\n ValueError, \"Dimension must be .*but is {}.*\".format(rank + 1)):\r\n self._tfFFT(x, rank, fft_length)\r\n with self.assertRaisesWithPredicateMatch(\r\n ValueError, \"Dimension must be .*but is {}.*\".format(rank + 1)):\r\n self._tfIFFT(x, rank, fft_length)\r\n\r\n # Test that calling the kernel directly without padding to fft_length\r\n # produces an error.\r\n rffts_for_rank = {\r\n 1: [gen_spectral_ops.rfft, gen_spectral_ops.irfft],\r\n 2: [gen_spectral_ops.rfft2d, gen_spectral_ops.irfft2d],\r\n 3: [gen_spectral_ops.rfft3d, gen_spectral_ops.irfft3d]\r\n }\r\n rfft_fn, irfft_fn = rffts_for_rank[rank]\r\n with self.assertRaisesWithPredicateMatch(\r\n errors.InvalidArgumentError,\r\n \"Input dimension .* must have length of at least 6 but got: 5\"):\r\n x = np.zeros((5,) * rank).astype(np.float32)\r\n fft_length = [6] * rank\r\n with self.cached_session():\r\n self.evaluate(rfft_fn(x, fft_length))\r\n\r\n with self.assertRaisesWithPredicateMatch(\r\n errors.InvalidArgumentError,\r\n \"Input dimension .* must have length of at least .* but got: 3\"):\r\n x = np.zeros((3,) * rank).astype(np.complex64)\r\n fft_length = [6] * rank\r\n with self.cached_session():\r\n self.evaluate(irfft_fn(x, fft_length))\r\n\r\n @test_util.run_deprecated_v1\r\n def testGrad_Simple(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n # rfft3d/irfft3d do not have gradients yet.\r\n if rank == 3:\r\n continue\r\n for dims in xrange(rank, rank + 2):\r\n for size in (5, 6):\r\n re = np.ones(shape=(size,) * dims, dtype=np.float32)\r\n im = -np.ones(shape=(size,) * dims, dtype=np.float32)\r\n self._checkGradReal(self._tfFFTForRank(rank), re)\r\n self._checkGradComplex(\r\n self._tfIFFTForRank(rank), re, im, result_is_complex=False)\r\n\r\n @test_util.run_deprecated_v1\r\n def testGrad_Random(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n # rfft3d/irfft3d do not have gradients yet.\r\n if rank == 3:\r\n continue\r\n for dims in xrange(rank, rank + 2):\r\n for size in (5, 6):\r\n re = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1\r\n im = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1\r\n self._checkGradReal(self._tfFFTForRank(rank), re)\r\n self._checkGradComplex(\r\n self._tfIFFTForRank(rank), re, im, result_is_complex=False)\r\n\r\n\r\nclass FFTShiftTest(test.TestCase):\r\n\r\n @test_util.run_deprecated_v1\r\n def testDefinition(self):\r\n with self.session():\r\n x = [0, 1, 2, 3, 4, -4, -3, -2, -1]\r\n y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]\r\n self.assertAllEqual(fft_ops.fftshift(x).eval(), y)\r\n self.assertAllEqual(fft_ops.ifftshift(y).eval(), x)\r\n x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]\r\n y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]\r\n self.assertAllEqual(fft_ops.fftshift(x).eval(), y)\r\n self.assertAllEqual(fft_ops.ifftshift(y).eval(), x)\r\n\r\n @test_util.run_deprecated_v1\r\n def testAxesKeyword(self):\r\n with self.session():\r\n freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]\r\n shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]\r\n self.assertAllEqual(fft_ops.fftshift(freqs, axes=(0, 1)).eval(), shifted)\r\n self.assertAllEqual(\r\n fft_ops.fftshift(freqs, axes=0).eval(),\r\n fft_ops.fftshift(freqs, axes=(0,)).eval())\r\n self.assertAllEqual(fft_ops.ifftshift(shifted, axes=(0, 1)).eval(), freqs)\r\n self.assertAllEqual(\r\n fft_ops.ifftshift(shifted, axes=0).eval(),\r\n fft_ops.ifftshift(shifted, axes=(0,)).eval())\r\n self.assertAllEqual(fft_ops.fftshift(freqs).eval(), shifted)\r\n self.assertAllEqual(fft_ops.ifftshift(shifted).eval(), freqs)\r\n\r\n @test_util.run_deprecated_v1\r\n def testNumpyCompatibility(self):\r\n with self.session():\r\n x = [0, 1, 2, 3, 4, -4, -3, -2, -1]\r\n y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]\r\n self.assertAllEqual(fft_ops.fftshift(x).eval(), np.fft.fftshift(x))\r\n self.assertAllEqual(fft_ops.ifftshift(y).eval(), np.fft.ifftshift(y))\r\n x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]\r\n y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]\r\n self.assertAllEqual(fft_ops.fftshift(x).eval(), np.fft.fftshift(x))\r\n self.assertAllEqual(fft_ops.ifftshift(y).eval(), np.fft.ifftshift(y))\r\n freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]\r\n shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]\r\n self.assertAllEqual(\r\n fft_ops.fftshift(freqs, axes=(0, 1)).eval(),\r\n np.fft.fftshift(freqs, axes=(0, 1)))\r\n self.assertAllEqual(\r\n fft_ops.ifftshift(shifted, axes=(0, 1)).eval(),\r\n np.fft.ifftshift(shifted, axes=(0, 1)))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for the `LatencyAllEdges` optimization.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.data.experimental.kernel_tests import stats_dataset_test_base\r\nfrom tensorflow.python.data.experimental.ops import optimization\r\nfrom tensorflow.python.data.experimental.ops import stats_aggregator\r\nfrom tensorflow.python.data.ops import dataset_ops\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\nclass LatencyAllEdgesTest(stats_dataset_test_base.StatsDatasetTestBase):\r\n\r\n def testLatencyStatsOptimization(self):\r\n aggregator = stats_aggregator.StatsAggregator()\r\n dataset = dataset_ops.Dataset.from_tensors(1).apply(\r\n optimization.assert_next(\r\n [\"LatencyStats\", \"Map\", \"LatencyStats\", \"Prefetch\",\r\n \"LatencyStats\"])).map(lambda x: x * x).prefetch(1)\r\n options = dataset_ops.Options()\r\n options.experimental_optimization.apply_default_optimizations = False\r\n options.experimental_stats.latency_all_edges = True\r\n options.experimental_stats.aggregator = aggregator\r\n dataset = dataset.with_options(options)\r\n self.assertDatasetProduces(\r\n dataset,\r\n expected_output=[1],\r\n requires_initialization=True,\r\n num_test_iterations=1)\r\n handle = self.getHandle(aggregator)\r\n self.assertStatisticsHasCount(\r\n handle, self.regexForNodeName(\"record_latency::TensorDataset\"), 1)\r\n self.assertStatisticsHasCount(\r\n handle, self.regexForNodeName(\"record_latency::MapDataset\"), 1)\r\n self.assertStatisticsHasCount(\r\n handle, self.regexForNodeName(\"record_latency::PrefetchDataset\"), 1)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Core module for TensorFlow distribution objects and helpers.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.util import deprecation\r\n\r\n\r\n# pylint: disable=wildcard-import,unused-import,g-import-not-at-top\r\nwith deprecation.silence():\r\n from tensorflow.python.ops.distributions.bernoulli import Bernoulli\r\n from tensorflow.python.ops.distributions.beta import Beta\r\n from tensorflow.python.ops.distributions.categorical import Categorical\r\n from tensorflow.python.ops.distributions.dirichlet import Dirichlet\r\n from tensorflow.python.ops.distributions.dirichlet_multinomial import DirichletMultinomial\r\n from tensorflow.python.ops.distributions.distribution import *\r\n from tensorflow.python.ops.distributions.exponential import Exponential\r\n from tensorflow.python.ops.distributions.gamma import Gamma\r\n from tensorflow.python.ops.distributions.kullback_leibler import *\r\n from tensorflow.python.ops.distributions.laplace import Laplace\r\n from tensorflow.python.ops.distributions.multinomial import Multinomial\r\n from tensorflow.python.ops.distributions.normal import Normal\r\n from tensorflow.python.ops.distributions.student_t import StudentT\r\n from tensorflow.python.ops.distributions.uniform import Uniform\r\n# pylint: enable=wildcard-import,unused-import\r\ndel deprecation\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for MultivariateNormalFullCovariance.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nfrom scipy import stats\r\nfrom tensorflow.contrib import distributions\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import nn_ops\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\nds = distributions\r\nrng = np.random.RandomState(42)\r\n\r\n\r\nclass MultivariateNormalFullCovarianceTest(test.TestCase):\r\n\r\n def _random_pd_matrix(self, *shape):\r\n mat = rng.rand(*shape)\r\n chol = ds.matrix_diag_transform(mat, transform=nn_ops.softplus)\r\n chol = array_ops.matrix_band_part(chol, -1, 0)\r\n return math_ops.matmul(chol, chol, adjoint_b=True).eval()\r\n\r\n def testRaisesIfInitializedWithNonSymmetricMatrix(self):\r\n with self.cached_session():\r\n mu = [1., 2.]\r\n sigma = [[1., 0.], [1., 1.]] # Nonsingular, but not symmetric\r\n mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)\r\n with self.assertRaisesOpError(\"not symmetric\"):\r\n mvn.covariance().eval()\r\n\r\n def testNamePropertyIsSetByInitArg(self):\r\n with self.cached_session():\r\n mu = [1., 2.]\r\n sigma = [[1., 0.], [0., 1.]]\r\n mvn = ds.MultivariateNormalFullCovariance(mu, sigma, name=\"Billy\")\r\n self.assertEqual(mvn.name, \"Billy/\")\r\n\r\n def testDoesNotRaiseIfInitializedWithSymmetricMatrix(self):\r\n with self.cached_session():\r\n mu = rng.rand(10)\r\n sigma = self._random_pd_matrix(10, 10)\r\n mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)\r\n # Should not raise\r\n mvn.covariance().eval()\r\n\r\n def testLogPDFScalarBatch(self):\r\n with self.cached_session():\r\n mu = rng.rand(2)\r\n sigma = self._random_pd_matrix(2, 2)\r\n mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)\r\n x = rng.rand(2)\r\n\r\n log_pdf = mvn.log_prob(x)\r\n pdf = mvn.prob(x)\r\n\r\n scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)\r\n\r\n expected_log_pdf = scipy_mvn.logpdf(x)\r\n expected_pdf = scipy_mvn.pdf(x)\r\n self.assertEqual((), log_pdf.get_shape())\r\n self.assertEqual((), pdf.get_shape())\r\n self.assertAllClose(expected_log_pdf, log_pdf.eval())\r\n self.assertAllClose(expected_pdf, pdf.eval())\r\n\r\n def testLogPDFScalarBatchCovarianceNotProvided(self):\r\n with self.cached_session():\r\n mu = rng.rand(2)\r\n mvn = ds.MultivariateNormalFullCovariance(\r\n mu, covariance_matrix=None, validate_args=True)\r\n x = rng.rand(2)\r\n\r\n log_pdf = mvn.log_prob(x)\r\n pdf = mvn.prob(x)\r\n\r\n # Initialize a scipy_mvn with the default covariance.\r\n scipy_mvn = stats.multivariate_normal(mean=mu, cov=np.eye(2))\r\n\r\n expected_log_pdf = scipy_mvn.logpdf(x)\r\n expected_pdf = scipy_mvn.pdf(x)\r\n self.assertEqual((), log_pdf.get_shape())\r\n self.assertEqual((), pdf.get_shape())\r\n self.assertAllClose(expected_log_pdf, log_pdf.eval())\r\n self.assertAllClose(expected_pdf, pdf.eval())\r\n\r\n def testShapes(self):\r\n with self.cached_session():\r\n mu = rng.rand(3, 5, 2)\r\n covariance = self._random_pd_matrix(3, 5, 2, 2)\r\n\r\n mvn = ds.MultivariateNormalFullCovariance(\r\n mu, covariance, validate_args=True)\r\n\r\n # Shapes known at graph construction time.\r\n self.assertEqual((2,), tuple(mvn.event_shape.as_list()))\r\n self.assertEqual((3, 5), tuple(mvn.batch_shape.as_list()))\r\n\r\n # Shapes known at runtime.\r\n self.assertEqual((2,), tuple(mvn.event_shape_tensor().eval()))\r\n self.assertEqual((3, 5), tuple(mvn.batch_shape_tensor().eval()))\r\n\r\n def _random_mu_and_sigma(self, batch_shape, event_shape):\r\n # This ensures sigma is positive def.\r\n mat_shape = batch_shape + event_shape + event_shape\r\n mat = rng.randn(*mat_shape)\r\n perm = np.arange(mat.ndim)\r\n perm[-2:] = [perm[-1], perm[-2]]\r\n sigma = np.matmul(mat, np.transpose(mat, perm))\r\n\r\n mu_shape = batch_shape + event_shape\r\n mu = rng.randn(*mu_shape)\r\n\r\n return mu, sigma\r\n\r\n def testKLBatch(self):\r\n batch_shape = [2]\r\n event_shape = [3]\r\n with self.cached_session():\r\n mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)\r\n mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)\r\n mvn_a = ds.MultivariateNormalFullCovariance(\r\n loc=mu_a,\r\n covariance_matrix=sigma_a,\r\n validate_args=True)\r\n mvn_b = ds.MultivariateNormalFullCovariance(\r\n loc=mu_b,\r\n covariance_matrix=sigma_b,\r\n validate_args=True)\r\n\r\n kl = ds.kl_divergence(mvn_a, mvn_b)\r\n self.assertEqual(batch_shape, kl.get_shape())\r\n\r\n kl_v = kl.eval()\r\n expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],\r\n mu_b[0, :], sigma_b[0, :])\r\n expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],\r\n mu_b[1, :], sigma_b[1, :])\r\n self.assertAllClose(expected_kl_0, kl_v[0])\r\n self.assertAllClose(expected_kl_1, kl_v[1])\r\n\r\n def testKLBatchBroadcast(self):\r\n batch_shape = [2]\r\n event_shape = [3]\r\n with self.cached_session():\r\n mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)\r\n # No batch shape.\r\n mu_b, sigma_b = self._random_mu_and_sigma([], event_shape)\r\n mvn_a = ds.MultivariateNormalFullCovariance(\r\n loc=mu_a,\r\n covariance_matrix=sigma_a,\r\n validate_args=True)\r\n mvn_b = ds.MultivariateNormalFullCovariance(\r\n loc=mu_b,\r\n covariance_matrix=sigma_b,\r\n validate_args=True)\r\n\r\n kl = ds.kl_divergence(mvn_a, mvn_b)\r\n self.assertEqual(batch_shape, kl.get_shape())\r\n\r\n kl_v = kl.eval()\r\n expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],\r\n mu_b, sigma_b)\r\n expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],\r\n mu_b, sigma_b)\r\n self.assertAllClose(expected_kl_0, kl_v[0])\r\n self.assertAllClose(expected_kl_1, kl_v[1])\r\n\r\n\r\ndef _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b):\r\n \"\"\"Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b).\"\"\"\r\n # Check using numpy operations\r\n # This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy.\r\n # So it is important to also check that KL(mvn, mvn) = 0.\r\n sigma_b_inv = np.linalg.inv(sigma_b)\r\n\r\n t = np.trace(sigma_b_inv.dot(sigma_a))\r\n q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a)\r\n k = mu_a.shape[0]\r\n l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a))\r\n\r\n return 0.5 * (t + q - k + l)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for Keras initializers.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\n\r\nfrom tensorflow.python import keras\r\nfrom tensorflow.python import tf2\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import init_ops\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\n@test_util.run_all_in_graph_and_eager_modes\r\nclass KerasInitializersTest(test.TestCase):\r\n\r\n def _runner(self, init, shape, target_mean=None, target_std=None,\r\n target_max=None, target_min=None):\r\n variable = keras.backend.variable(init(shape))\r\n output = keras.backend.get_value(variable)\r\n # Test serialization (assumes deterministic behavior).\r\n config = init.get_config()\r\n reconstructed_init = init.__class__.from_config(config)\r\n variable = keras.backend.variable(reconstructed_init(shape))\r\n output_2 = keras.backend.get_value(variable)\r\n self.assertAllClose(output, output_2, atol=1e-4)\r\n\r\n def test_uniform(self):\r\n tensor_shape = (9, 6, 7)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.RandomUniformV2(minval=-1, maxval=1, seed=124),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_max=1,\r\n target_min=-1)\r\n\r\n def test_normal(self):\r\n tensor_shape = (8, 12, 99)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.RandomNormalV2(mean=0, stddev=1, seed=153),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=1)\r\n\r\n def test_truncated_normal(self):\r\n tensor_shape = (12, 99, 7)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.TruncatedNormalV2(mean=0, stddev=1, seed=126),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_max=2,\r\n target_min=-2)\r\n\r\n def test_constant(self):\r\n tensor_shape = (5, 6, 4)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.ConstantV2(2.),\r\n tensor_shape,\r\n target_mean=2,\r\n target_max=2,\r\n target_min=2)\r\n\r\n def test_lecun_uniform(self):\r\n tensor_shape = (5, 6, 4, 2)\r\n with self.cached_session():\r\n fan_in, _ = init_ops._compute_fans(tensor_shape)\r\n std = np.sqrt(1. / fan_in)\r\n self._runner(\r\n keras.initializers.lecun_uniformV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=std)\r\n\r\n def test_glorot_uniform(self):\r\n tensor_shape = (5, 6, 4, 2)\r\n with self.cached_session():\r\n fan_in, fan_out = init_ops._compute_fans(tensor_shape)\r\n std = np.sqrt(2. / (fan_in + fan_out))\r\n self._runner(\r\n keras.initializers.GlorotUniformV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=std)\r\n\r\n def test_he_uniform(self):\r\n tensor_shape = (5, 6, 4, 2)\r\n with self.cached_session():\r\n fan_in, _ = init_ops._compute_fans(tensor_shape)\r\n std = np.sqrt(2. / fan_in)\r\n self._runner(\r\n keras.initializers.he_uniformV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=std)\r\n\r\n def test_lecun_normal(self):\r\n tensor_shape = (5, 6, 4, 2)\r\n with self.cached_session():\r\n fan_in, _ = init_ops._compute_fans(tensor_shape)\r\n std = np.sqrt(1. / fan_in)\r\n self._runner(\r\n keras.initializers.lecun_normalV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=std)\r\n\r\n def test_glorot_normal(self):\r\n tensor_shape = (5, 6, 4, 2)\r\n with self.cached_session():\r\n fan_in, fan_out = init_ops._compute_fans(tensor_shape)\r\n std = np.sqrt(2. / (fan_in + fan_out))\r\n self._runner(\r\n keras.initializers.GlorotNormalV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=std)\r\n\r\n def test_he_normal(self):\r\n tensor_shape = (5, 6, 4, 2)\r\n with self.cached_session():\r\n fan_in, _ = init_ops._compute_fans(tensor_shape)\r\n std = np.sqrt(2. / fan_in)\r\n self._runner(\r\n keras.initializers.he_normalV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=std)\r\n\r\n def test_orthogonal(self):\r\n tensor_shape = (20, 20)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.OrthogonalV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.)\r\n\r\n def test_identity(self):\r\n with self.cached_session():\r\n tensor_shape = (3, 4, 5)\r\n with self.assertRaises(ValueError):\r\n self._runner(\r\n keras.initializers.IdentityV2(),\r\n tensor_shape,\r\n target_mean=1. / tensor_shape[0],\r\n target_max=1.)\r\n\r\n tensor_shape = (3, 3)\r\n self._runner(\r\n keras.initializers.IdentityV2(),\r\n tensor_shape,\r\n target_mean=1. / tensor_shape[0],\r\n target_max=1.)\r\n\r\n def test_zero(self):\r\n tensor_shape = (4, 5)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.ZerosV2(),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_max=0.)\r\n\r\n def test_one(self):\r\n tensor_shape = (4, 5)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.OnesV2(),\r\n tensor_shape,\r\n target_mean=1.,\r\n target_max=1.)\r\n\r\n def test_default_random_uniform(self):\r\n ru = keras.initializers.get('uniform')\r\n self.assertEqual(ru.minval, -0.05)\r\n self.assertEqual(ru.maxval, 0.05)\r\n\r\n def test_default_random_normal(self):\r\n rn = keras.initializers.get('normal')\r\n self.assertEqual(rn.mean, 0.0)\r\n self.assertEqual(rn.stddev, 0.05)\r\n\r\n def test_default_truncated_normal(self):\r\n tn = keras.initializers.get('truncated_normal')\r\n self.assertEqual(tn.mean, 0.0)\r\n self.assertEqual(tn.stddev, 0.05)\r\n\r\n def test_initializer_v2_get(self):\r\n tf2_force_enabled = tf2._force_enable # pylint: disable=protected-access\r\n try:\r\n tf2.enable()\r\n rn = keras.initializers.get('random_normal')\r\n self.assertIn('init_ops_v2', rn.__class__.__module__)\r\n finally:\r\n tf2._force_enable = tf2_force_enabled # pylint: disable=protected-access\r\n\r\n def test_custom_initializer_saving(self):\r\n\r\n def my_initializer(shape, dtype=None):\r\n return array_ops.ones(shape, dtype=dtype)\r\n\r\n inputs = keras.Input((10,))\r\n outputs = keras.layers.Dense(1, kernel_initializer=my_initializer)(inputs)\r\n model = keras.Model(inputs, outputs)\r\n model2 = model.from_config(\r\n model.get_config(), custom_objects={'my_initializer': my_initializer})\r\n self.assertEqual(model2.layers[1].kernel_initializer, my_initializer)\r\n\r\n @test_util.run_v2_only\r\n def test_load_external_variance_scaling_v2(self):\r\n external_serialized_json = {\r\n 'class_name': 'VarianceScaling',\r\n 'config': {\r\n 'distribution': 'normal',\r\n 'mode': 'fan_avg',\r\n 'scale': 1.0,\r\n 'seed': None\r\n }\r\n }\r\n initializer = keras.initializers.deserialize(external_serialized_json)\r\n self.assertEqual(initializer.distribution, 'truncated_normal')\r\n\r\n\r\nif __name__ == '__main__':\r\n test.main()\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"LSTM Block Cell ops.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom absl.testing import parameterized\r\nimport numpy as np\r\n\r\nfrom tensorflow.contrib.rnn.python.kernel_tests import benchmarking\r\nfrom tensorflow.contrib.rnn.python.ops import lstm_ops\r\nfrom tensorflow.python.client import session\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import gen_array_ops\r\nfrom tensorflow.python.ops import gen_bitwise_ops\r\nfrom tensorflow.python.ops import gradients_impl\r\nfrom tensorflow.python.ops import init_ops\r\nfrom tensorflow.python.ops import rnn\r\nfrom tensorflow.python.ops import rnn_cell\r\nfrom tensorflow.python.ops import variable_scope\r\nfrom tensorflow.python.ops import variables\r\nfrom tensorflow.python.platform import test\r\n\r\nblock_lstm = lstm_ops._block_lstm # pylint: disable=protected-access\r\n\r\n\r\nclass _MaskedRandomUniformInitializer(init_ops.RandomUniform):\r\n \"\"\"Initializer for uniform dist tensors with trailing bits zeroed-out.\r\n\r\n Allow returning tensors with last few mantissa bits set to 0. This potentially\r\n helps avoid getting into precision issues when testing low precision (float16)\r\n computation.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n minval=0,\r\n maxval=None,\r\n seed=None,\r\n dtype=dtypes.float16,\r\n num_valid_mantissa_bits=4):\r\n \"\"\"Constructor.\r\n\r\n Args:\r\n minval: A python scalar or a scalar tensor. Lower bound of the range of\r\n random values to generate.\r\n maxval: A python scalar or a scalar tensor. Upper bound of the range of\r\n random values to generate. Defaults to 1 for float types.\r\n seed: A Python integer. Used to create random seeds. See\r\n `tf.compat.v1.set_random_seed` for behavior.\r\n dtype: The data type. Only supports tf.float16 for now.\r\n num_valid_mantissa_bits: number of non-zero mantissa bits, default to 4.\r\n\r\n Raises:\r\n ValueError: An error if `dtype` is not tf.float16.\r\n \"\"\"\r\n if dtype not in (dtypes.float16,):\r\n raise ValueError(\"dtype: %s not supported\" % dtype.name)\r\n\r\n super(_MaskedRandomUniformInitializer, self).__init__(\r\n minval=minval, maxval=maxval, seed=seed, dtype=dtype)\r\n self._num_mantissa_bits = 10\r\n self._num_valid_mantissa_bits = num_valid_mantissa_bits\r\n\r\n def __call__(self, shape, dtype=dtypes.float16, partition_info=None):\r\n if dtype and dtype != dtypes.float16:\r\n raise ValueError(\"dtype: %s not supported\" % dtype.name)\r\n res = super(_MaskedRandomUniformInitializer, self).__call__(\r\n shape, dtype, partition_info)\r\n # get uint16 view of the underlying buffer.\r\n res = gen_array_ops.bitcast(res, dtypes.uint16)\r\n\r\n # mask the last `shift` mantissa bits.\r\n shift = self._num_mantissa_bits - self._num_valid_mantissa_bits\r\n mask = (0xffff >> shift) << shift\r\n res = gen_bitwise_ops.bitwise_and(res, mask)\r\n\r\n # restore float16 view.\r\n return gen_array_ops.bitcast(res, dtype)\r\n\r\n\r\ndef _get_initializer(init_bound, dtype, seed):\r\n if dtype == dtypes.float16:\r\n return _MaskedRandomUniformInitializer(\r\n -init_bound, init_bound, dtype=dtype, seed=seed)\r\n else:\r\n return init_ops.random_uniform_initializer(\r\n -init_bound, init_bound, dtype=dtype, seed=seed)\r\n\r\n\r\ndef blocks_match(sess, use_peephole, dtype=dtypes.float32, cell_clip=None):\r\n batch_size = 2\r\n input_size = 3\r\n cell_size = 4\r\n sequence_length = 4\r\n\r\n inputs = []\r\n for _ in range(sequence_length):\r\n inp = ops.convert_to_tensor(\r\n np.random.randn(batch_size, input_size), dtype=dtype)\r\n inputs.append(inp)\r\n stacked_inputs = array_ops.stack(inputs)\r\n\r\n init_bound = 1e-1 if dtype == dtypes.float16 else 1e-2\r\n initializer = _get_initializer(init_bound, dtype=dtype, seed=19890212)\r\n\r\n with variable_scope.variable_scope(\"test\", initializer=initializer):\r\n # magic naming so that the cells pick up these variables and reuse them\r\n if use_peephole:\r\n wci = variable_scope.get_variable(\r\n \"rnn/lstm_cell/w_i_diag\", shape=[cell_size], dtype=dtype)\r\n wcf = variable_scope.get_variable(\r\n \"rnn/lstm_cell/w_f_diag\", shape=[cell_size], dtype=dtype)\r\n wco = variable_scope.get_variable(\r\n \"rnn/lstm_cell/w_o_diag\", shape=[cell_size], dtype=dtype)\r\n\r\n w = variable_scope.get_variable(\r\n \"rnn/lstm_cell/kernel\",\r\n shape=[input_size + cell_size, cell_size * 4],\r\n dtype=dtype)\r\n b = variable_scope.get_variable(\r\n \"rnn/lstm_cell/bias\",\r\n shape=[cell_size * 4],\r\n dtype=dtype,\r\n initializer=init_ops.zeros_initializer())\r\n\r\n basic_cell = rnn_cell.LSTMCell(\r\n cell_size,\r\n use_peepholes=use_peephole,\r\n cell_clip=cell_clip,\r\n dtype=dtype,\r\n state_is_tuple=True,\r\n reuse=True)\r\n basic_outputs_op, basic_state_op = rnn.static_rnn(\r\n basic_cell, inputs, dtype=dtype)\r\n\r\n if use_peephole:\r\n _, _, _, _, _, _, block_outputs_op = block_lstm(\r\n ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),\r\n inputs,\r\n w,\r\n b,\r\n wci=wci,\r\n wcf=wcf,\r\n wco=wco,\r\n cell_clip=cell_clip,\r\n use_peephole=True)\r\n else:\r\n _, _, _, _, _, _, block_outputs_op = block_lstm(\r\n ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),\r\n inputs,\r\n w,\r\n b,\r\n cell_clip=cell_clip)\r\n\r\n fused_cell = lstm_ops.LSTMBlockFusedCell(\r\n cell_size,\r\n cell_clip=cell_clip,\r\n use_peephole=use_peephole,\r\n reuse=True,\r\n name=\"rnn/lstm_cell\")\r\n fused_outputs_op, fused_state_op = fused_cell(stacked_inputs, dtype=dtype)\r\n\r\n sess.run([variables.global_variables_initializer()])\r\n basic_outputs, basic_state = sess.run([basic_outputs_op, basic_state_op[0]])\r\n basic_grads = sess.run(gradients_impl.gradients(basic_outputs_op, inputs))\r\n xs = [w, b]\r\n if use_peephole:\r\n xs += [wci, wcf, wco]\r\n basic_wgrads = sess.run(gradients_impl.gradients(basic_outputs_op, xs))\r\n\r\n block_outputs = sess.run(block_outputs_op)\r\n block_grads = sess.run(gradients_impl.gradients(block_outputs_op, inputs))\r\n block_wgrads = sess.run(gradients_impl.gradients(block_outputs_op, xs))\r\n\r\n xs = [w, b]\r\n if use_peephole:\r\n xs += [wci, wcf, wco]\r\n fused_outputs, fused_state = sess.run([fused_outputs_op, fused_state_op[0]])\r\n fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))\r\n fused_wgrads = sess.run(gradients_impl.gradients(fused_outputs_op, xs))\r\n\r\n return (basic_state, fused_state, basic_outputs, block_outputs,\r\n fused_outputs, basic_grads, block_grads, fused_grads, basic_wgrads,\r\n block_wgrads, fused_wgrads)\r\n\r\n\r\nclass LSTMBlockCellTest(test.TestCase, parameterized.TestCase):\r\n\r\n TEST_CASES = ({\r\n \"testcase_name\": \"Fp32\",\r\n \"dtype\": dtypes.float32,\r\n \"rtol\": 1e-6,\r\n \"atol\": 1e-6\r\n }, {\r\n \"testcase_name\": \"Fp16\",\r\n \"dtype\": dtypes.float16,\r\n \"rtol\": 8e-3,\r\n \"atol\": 8e-4\r\n })\r\n\r\n def testNoneDimsWithDynamicRNN(self):\r\n with self.session(use_gpu=True, graph=ops.Graph()) as sess:\r\n batch_size = 4\r\n num_steps = 5\r\n input_dim = 6\r\n cell_size = 7\r\n\r\n cell = lstm_ops.LSTMBlockCell(cell_size)\r\n x = array_ops.placeholder(dtypes.float32, shape=(None, None, input_dim))\r\n\r\n output, _ = rnn.dynamic_rnn(\r\n cell, x, time_major=True, dtype=dtypes.float32)\r\n sess.run(variables.global_variables_initializer())\r\n feed = {}\r\n feed[x] = np.random.randn(num_steps, batch_size, input_dim)\r\n sess.run(output, feed)\r\n\r\n def testLSTMBlockCell(self):\r\n with self.session(use_gpu=True, graph=ops.Graph()) as sess:\r\n with variable_scope.variable_scope(\r\n \"root\", initializer=init_ops.constant_initializer(0.5)):\r\n x = array_ops.zeros([1, 2])\r\n m0 = array_ops.zeros([1, 2])\r\n m1 = array_ops.zeros([1, 2])\r\n m2 = array_ops.zeros([1, 2])\r\n m3 = array_ops.zeros([1, 2])\r\n g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(\r\n [lstm_ops.LSTMBlockCell(2)\r\n for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))\r\n sess.run([variables.global_variables_initializer()])\r\n res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {\r\n x.name: np.array([[1., 1.]]),\r\n m0.name: 0.1 * np.ones([1, 2]),\r\n m1.name: 0.1 * np.ones([1, 2]),\r\n m2.name: 0.1 * np.ones([1, 2]),\r\n m3.name: 0.1 * np.ones([1, 2])\r\n })\r\n self.assertEqual(len(res), 5)\r\n self.assertAllClose(res[0], [[0.24024698, 0.24024698]])\r\n # These numbers are from testBasicLSTMCell and only test c/h.\r\n self.assertAllClose(res[1], [[0.68967271, 0.68967271]])\r\n self.assertAllClose(res[2], [[0.44848421, 0.44848421]])\r\n self.assertAllClose(res[3], [[0.39897051, 0.39897051]])\r\n self.assertAllClose(res[4], [[0.24024698, 0.24024698]])\r\n\r\n def testCompatibleNames(self):\r\n with self.session(use_gpu=True, graph=ops.Graph()):\r\n cell = rnn_cell.LSTMCell(10)\r\n pcell = rnn_cell.LSTMCell(10, use_peepholes=True)\r\n inputs = [array_ops.zeros([4, 5])] * 6\r\n rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope=\"basic\")\r\n rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope=\"peephole\")\r\n basic_names = {\r\n v.name: v.get_shape()\r\n for v in variables.trainable_variables()\r\n }\r\n\r\n with self.session(use_gpu=True, graph=ops.Graph()):\r\n cell = lstm_ops.LSTMBlockCell(10)\r\n pcell = lstm_ops.LSTMBlockCell(10, use_peephole=True)\r\n inputs = [array_ops.zeros([4, 5])] * 6\r\n rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope=\"basic\")\r\n rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope=\"peephole\")\r\n block_names = {\r\n v.name: v.get_shape()\r\n for v in variables.trainable_variables()\r\n }\r\n\r\n with self.session(use_gpu=True, graph=ops.Graph()):\r\n cell = lstm_ops.LSTMBlockFusedCell(10)\r\n pcell = lstm_ops.LSTMBlockFusedCell(10, use_peephole=True)\r\n inputs = array_ops.stack([array_ops.zeros([4, 5])] * 6)\r\n cell(inputs, dtype=dtypes.float32, scope=\"basic/lstm_cell\")\r\n pcell(inputs, dtype=dtypes.float32, scope=\"peephole/lstm_cell\")\r\n fused_names = {\r\n v.name: v.get_shape()\r\n for v in variables.trainable_variables()\r\n }\r\n\r\n self.assertEqual(basic_names, block_names)\r\n self.assertEqual(basic_names, fused_names)\r\n\r\n def testLSTMBasicToBlockCell(self):\r\n with self.session(use_gpu=True) as sess:\r\n x = array_ops.zeros([1, 2])\r\n x_values = np.random.randn(1, 2)\r\n\r\n m0_val = 0.1 * np.ones([1, 2])\r\n m1_val = -0.1 * np.ones([1, 2])\r\n m2_val = -0.2 * np.ones([1, 2])\r\n m3_val = 0.2 * np.ones([1, 2])\r\n\r\n initializer = init_ops.random_uniform_initializer(\r\n -0.01, 0.01, seed=19890212)\r\n with variable_scope.variable_scope(\"basic\", initializer=initializer):\r\n m0 = array_ops.zeros([1, 2])\r\n m1 = array_ops.zeros([1, 2])\r\n m2 = array_ops.zeros([1, 2])\r\n m3 = array_ops.zeros([1, 2])\r\n g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(\r\n [rnn_cell.BasicLSTMCell(2, state_is_tuple=True) for _ in range(2)],\r\n state_is_tuple=True)(x, ((m0, m1), (m2, m3)))\r\n sess.run([variables.global_variables_initializer()])\r\n basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {\r\n x.name: x_values,\r\n m0.name: m0_val,\r\n m1.name: m1_val,\r\n m2.name: m2_val,\r\n m3.name: m3_val\r\n })\r\n\r\n with variable_scope.variable_scope(\"block\", initializer=initializer):\r\n m0 = array_ops.zeros([1, 2])\r\n m1 = array_ops.zeros([1, 2])\r\n m2 = array_ops.zeros([1, 2])\r\n m3 = array_ops.zeros([1, 2])\r\n g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(\r\n [lstm_ops.LSTMBlockCell(2)\r\n for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))\r\n sess.run([variables.global_variables_initializer()])\r\n block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {\r\n x.name: x_values,\r\n m0.name: m0_val,\r\n m1.name: m1_val,\r\n m2.name: m2_val,\r\n m3.name: m3_val\r\n })\r\n\r\n self.assertEqual(len(basic_res), len(block_res))\r\n for basic, block in zip(basic_res, block_res):\r\n self.assertAllClose(basic, block)\r\n\r\n def testLSTMBasicToBlockCellPeeping(self):\r\n with self.session(use_gpu=True) as sess:\r\n x = array_ops.zeros([1, 2])\r\n x_values = np.random.randn(1, 2)\r\n\r\n m0_val = 0.1 * np.ones([1, 2])\r\n m1_val = -0.1 * np.ones([1, 2])\r\n m2_val = -0.2 * np.ones([1, 2])\r\n m3_val = 0.2 * np.ones([1, 2])\r\n\r\n initializer = init_ops.random_uniform_initializer(\r\n -0.01, 0.01, seed=19890212)\r\n with variable_scope.variable_scope(\"basic\", initializer=initializer):\r\n m0 = array_ops.zeros([1, 2])\r\n m1 = array_ops.zeros([1, 2])\r\n m2 = array_ops.zeros([1, 2])\r\n m3 = array_ops.zeros([1, 2])\r\n g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(\r\n [\r\n rnn_cell.LSTMCell(2, use_peepholes=True, state_is_tuple=True)\r\n for _ in range(2)\r\n ],\r\n state_is_tuple=True)(x, ((m0, m1), (m2, m3)))\r\n sess.run([variables.global_variables_initializer()])\r\n basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {\r\n x.name: x_values,\r\n m0.name: m0_val,\r\n m1.name: m1_val,\r\n m2.name: m2_val,\r\n m3.name: m3_val\r\n })\r\n\r\n with variable_scope.variable_scope(\"block\", initializer=initializer):\r\n m0 = array_ops.zeros([1, 2])\r\n m1 = array_ops.zeros([1, 2])\r\n m2 = array_ops.zeros([1, 2])\r\n m3 = array_ops.zeros([1, 2])\r\n g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(\r\n [lstm_ops.LSTMBlockCell(2, use_peephole=True) for _ in range(2)],\r\n state_is_tuple=True)(x, ((m0, m1), (m2, m3)))\r\n sess.run([variables.global_variables_initializer()])\r\n block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {\r\n x.name: x_values,\r\n m0.name: m0_val,\r\n m1.name: m1_val,\r\n m2.name: m2_val,\r\n m3.name: m3_val\r\n })\r\n\r\n self.assertEqual(len(basic_res), len(block_res))\r\n for basic, block in zip(basic_res, block_res):\r\n self.assertAllClose(basic, block)\r\n\r\n def LSTMBasicToBlockTestHelper(self,\r\n dtype=dtypes.float32,\r\n use_peephole=False,\r\n cell_clip=None,\r\n rtol=1e-6,\r\n atol=1e-6):\r\n with self.session(use_gpu=True, graph=ops.Graph()) as sess:\r\n (basic_state, fused_state, basic_outputs, block_outputs, fused_outputs,\r\n basic_grads, block_grads, fused_grads, basic_wgrads, block_wgrads,\r\n fused_wgrads) = blocks_match(\r\n sess, use_peephole=use_peephole, dtype=dtype, cell_clip=cell_clip)\r\n\r\n self.assertAllClose(basic_outputs, block_outputs, rtol=rtol, atol=atol)\r\n self.assertAllClose(basic_grads, block_grads, rtol=rtol, atol=atol)\r\n for basic, block in zip(basic_wgrads, block_wgrads):\r\n self.assertAllClose(basic, block, rtol=rtol, atol=atol)\r\n\r\n self.assertAllClose(basic_outputs, fused_outputs, rtol=rtol, atol=atol)\r\n self.assertAllClose(basic_state, fused_state, rtol=rtol, atol=atol)\r\n self.assertAllClose(basic_grads, fused_grads, rtol=rtol, atol=atol)\r\n for basic, fused in zip(basic_wgrads, fused_wgrads):\r\n self.assertAllClose(basic, fused, rtol=rtol, atol=atol)\r\n\r\n @parameterized.named_parameters(*TEST_CASES)\r\n def testLSTMBasicToBlock(self, dtype, rtol, atol):\r\n self.LSTMBasicToBlockTestHelper(\r\n dtype, use_peephole=False, rtol=rtol, atol=atol)\r\n\r\n @parameterized.named_parameters(*TEST_CASES)\r\n def testLSTMBasicToBlockPeeping(self, dtype, rtol, atol):\r\n self.LSTMBasicToBlockTestHelper(\r\n dtype, use_peephole=True, rtol=rtol, atol=atol)\r\n\r\n @parameterized.named_parameters(*TEST_CASES)\r\n def testLSTMBasicToBlockCellClip(self, dtype, rtol, atol):\r\n self.LSTMBasicToBlockTestHelper(\r\n dtype, use_peephole=True, cell_clip=0.5, rtol=rtol, atol=atol)\r\n\r\n def testLSTMFusedSequenceLengths(self):\r\n \"\"\"Verify proper support for sequence lengths in LSTMBlockFusedCell.\"\"\"\r\n with self.session(use_gpu=True) as sess:\r\n batch_size = 3\r\n input_size = 4\r\n cell_size = 5\r\n max_sequence_length = 6\r\n\r\n inputs = []\r\n for _ in range(max_sequence_length):\r\n inp = ops.convert_to_tensor(\r\n np.random.randn(batch_size, input_size), dtype=dtypes.float32)\r\n inputs.append(inp)\r\n seq_lengths = constant_op.constant([3, 4, 5])\r\n cell_inputs = array_ops.stack(inputs)\r\n\r\n initializer = init_ops.random_uniform_initializer(\r\n -0.01, 0.01, seed=19890213)\r\n\r\n with variable_scope.variable_scope(\"lstm_cell\", initializer=initializer):\r\n # magic naming so that the cells pick up these variables and reuse them\r\n variable_scope.get_variable(\r\n \"kernel\",\r\n shape=[input_size + cell_size, cell_size * 4],\r\n dtype=dtypes.float32)\r\n\r\n variable_scope.get_variable(\r\n \"bias\",\r\n shape=[cell_size * 4],\r\n dtype=dtypes.float32,\r\n initializer=init_ops.zeros_initializer())\r\n\r\n cell = lstm_ops.LSTMBlockFusedCell(\r\n cell_size, cell_clip=0, use_peephole=False, reuse=True,\r\n name=\"lstm_cell\")\r\n\r\n fused_outputs_op, fused_state_op = cell(\r\n cell_inputs, dtype=dtypes.float32, sequence_length=seq_lengths)\r\n\r\n cell_vars = [\r\n v for v in variables.trainable_variables()\r\n if v.name.endswith(\"kernel\") or v.name.endswith(\"bias\")\r\n ]\r\n\r\n # Verify that state propagation works if we turn our sequence into\r\n # tiny (single-time) subsequences, i.e. unfuse the cell\r\n unfused_outputs_op = []\r\n state = None\r\n with variable_scope.variable_scope(\r\n variable_scope.get_variable_scope(), reuse=True):\r\n for i, inp in enumerate(inputs):\r\n lengths = [int(i < l) for l in seq_lengths.eval()]\r\n output, state = cell(\r\n array_ops.expand_dims(inp, 0),\r\n initial_state=state,\r\n dtype=dtypes.float32,\r\n sequence_length=lengths)\r\n unfused_outputs_op.append(output[0])\r\n unfused_outputs_op = array_ops.stack(unfused_outputs_op)\r\n\r\n sess.run([variables.global_variables_initializer()])\r\n unfused_outputs, unfused_state = sess.run([unfused_outputs_op, state[0]])\r\n unfused_grads = sess.run(\r\n gradients_impl.gradients(unfused_outputs_op, inputs))\r\n unfused_wgrads = sess.run(\r\n gradients_impl.gradients(unfused_outputs_op, cell_vars))\r\n\r\n fused_outputs, fused_state = sess.run(\r\n [fused_outputs_op, fused_state_op[0]])\r\n fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))\r\n fused_wgrads = sess.run(\r\n gradients_impl.gradients(fused_outputs_op, cell_vars))\r\n\r\n self.assertAllClose(fused_outputs, unfused_outputs)\r\n self.assertAllClose(fused_state, unfused_state)\r\n self.assertAllClose(fused_grads, unfused_grads)\r\n for fused, unfused in zip(fused_wgrads, unfused_wgrads):\r\n self.assertAllClose(fused, unfused, rtol=1e-6, atol=1e-6)\r\n\r\n#### Benchmarking.\r\n\r\n\r\nclass BenchmarkLSTMBlock(test.Benchmark):\r\n\r\n def benchmarkLSTMBlockCellFpropWithDynamicRNN(self):\r\n print(\"BlockLSTMCell forward propagation via dynamic_rnn().\")\r\n print(\"--------------------------------------------------------------\")\r\n print(\"LSTMBlockCell Seconds per inference.\")\r\n print(\"batch_size,cell_size,input_size,time_steps,use_gpu,wall_time\")\r\n iters = 10\r\n for config in benchmarking.dict_product({\r\n \"batch_size\": [1, 8, 13, 32, 67, 128],\r\n \"cell_size\": [128, 250, 512, 650, 1024, 1350],\r\n \"time_steps\": [40],\r\n \"use_gpu\": [True, False],\r\n \"dtype\": [\"float32\", \"float16\"],\r\n }):\r\n dtype = dtypes.float32 if config[\"dtype\"] == \"float32\" else dtypes.float16\r\n with ops.Graph().as_default():\r\n with benchmarking.device(use_gpu=config[\"use_gpu\"]):\r\n inputs = variable_scope.get_variable(\r\n \"x\",\r\n dtype=dtype,\r\n shape=[\r\n config[\"time_steps\"], config[\"batch_size\"],\r\n config[\"cell_size\"]\r\n ])\r\n cell = lstm_ops.LSTMBlockCell(config[\"cell_size\"], dtype=dtype)\r\n outputs = rnn.dynamic_rnn(cell, inputs, time_major=True, dtype=dtype)\r\n init_op = variables.global_variables_initializer()\r\n\r\n with session.Session() as sess:\r\n sess.run(init_op)\r\n wall_time = benchmarking.seconds_per_run(outputs, sess, iters)\r\n\r\n # Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable\r\n # is set, this will produce a copy-paste-able CSV file.\r\n print(\",\".join(\r\n map(str, [\r\n config[\"dtype\"], config[\"batch_size\"], config[\"cell_size\"],\r\n config[\"cell_size\"], config[\"time_steps\"], config[\"use_gpu\"],\r\n wall_time\r\n ])))\r\n benchmark_name_template = \"_\".join([\r\n \"LSTMBlockCell_fprop\", \"DT_%(dtype)s\", \"BS%(batch_size)i\",\r\n \"CS%(cell_size)i\", \"IS%(cell_size)i\", \"TS%(time_steps)i\",\r\n \"gpu_%(use_gpu)s\"\r\n ])\r\n\r\n self.report_benchmark(\r\n name=benchmark_name_template % config,\r\n iters=iters,\r\n wall_time=wall_time,\r\n extras=config)\r\n\r\n def benchmarkLSTMBlockCellBpropWithDynamicRNN(self):\r\n print(\"BlockLSTMCell backward propagation via dynamic_rnn().\")\r\n print(\"--------------------------------------------------------------\")\r\n print(\"LSTMBlockCell Seconds per inference.\")\r\n print(\"batch_size,cell_size,input_size,time_steps,use_gpu,wall_time\")\r\n iters = 10\r\n for config in benchmarking.dict_product({\r\n \"batch_size\": [1, 8, 13, 32, 67, 128],\r\n \"cell_size\": [128, 250, 512, 650, 1024, 1350],\r\n \"time_steps\": [40],\r\n \"use_gpu\": [True, False],\r\n \"dtype\": [\"float32\", \"float16\"],\r\n }):\r\n dtype = dtypes.float32 if config[\"dtype\"] == \"float32\" else dtypes.float16\r\n with ops.Graph().as_default():\r\n with benchmarking.device(use_gpu=config[\"use_gpu\"]):\r\n time_steps = config[\"time_steps\"]\r\n batch_size = config[\"batch_size\"]\r\n cell_size = input_size = config[\"cell_size\"]\r\n inputs = variable_scope.get_variable(\r\n \"x\", [time_steps, batch_size, cell_size],\r\n trainable=False,\r\n dtype=dtype)\r\n with variable_scope.variable_scope(\r\n \"rnn\", reuse=variable_scope.AUTO_REUSE):\r\n w = variable_scope.get_variable(\r\n \"rnn/lstm_cell/kernel\",\r\n shape=[input_size + cell_size, cell_size * 4],\r\n dtype=dtype)\r\n b = variable_scope.get_variable(\r\n \"rnn/lstm_cell/bias\",\r\n shape=[cell_size * 4],\r\n dtype=dtype,\r\n initializer=init_ops.zeros_initializer())\r\n cell = lstm_ops.LSTMBlockCell(cell_size, dtype=dtype)\r\n outputs = rnn.dynamic_rnn(\r\n cell, inputs, time_major=True, dtype=dtype)\r\n grads = gradients_impl.gradients(outputs, [inputs, w, b])\r\n init_op = variables.global_variables_initializer()\r\n\r\n with session.Session() as sess:\r\n sess.run(init_op)\r\n wall_time = benchmarking.seconds_per_run(grads, sess, iters)\r\n\r\n # Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable\r\n # is set, this will produce a copy-paste-able CSV file.\r\n print(\",\".join(\r\n map(str, [\r\n config[\"dtype\"], batch_size, cell_size, cell_size, time_steps,\r\n config[\"use_gpu\"], wall_time\r\n ])))\r\n benchmark_name_template = \"_\".join([\r\n \"LSTMBlockCell_bprop\", \"DT_%(dtype)s\", \"BS%(batch_size)i\",\r\n \"CS%(cell_size)i\", \"IS%(cell_size)i\", \"TS%(time_steps)i\",\r\n \"gpu_%(use_gpu)s\"\r\n ])\r\n\r\n self.report_benchmark(\r\n name=benchmark_name_template % config,\r\n iters=iters,\r\n wall_time=wall_time,\r\n extras=config)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Training related logic for Keras model in TF 2.0 context.\r\n\r\nNote that all the code under this module is under active development, please DO\r\nNOT use it unless you are really sure what you are doing.\r\n\"\"\"\r\n\r\n# pylint: disable=protected-access\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport functools\r\n\r\nfrom tensorflow.python.distribute import distribution_strategy_context\r\nfrom tensorflow.python.eager import def_function\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import tensor_util\r\nfrom tensorflow.python.framework.ops import composite_tensor\r\nfrom tensorflow.python.keras import backend\r\nfrom tensorflow.python.keras.distribute import distributed_training_utils as dist_utils\r\nfrom tensorflow.python.keras.engine import training_eager\r\nfrom tensorflow.python.keras.engine import training_utils\r\nfrom tensorflow.python.keras.utils.mode_keys import ModeKeys\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.util import nest\r\n\r\n\r\ndef _get_or_make_execution_function(model, mode):\r\n \"\"\"Makes or reuses function to run one step of distributed model execution.\"\"\"\r\n model._init_distributed_function_cache_if_not_compiled()\r\n\r\n # Use a key with 'v2' to distinguish from fall-back execution functions.\r\n key = (mode, 'v2')\r\n distributed_function = dist_utils.get_distributed_function(model, key)\r\n if distributed_function:\r\n return distributed_function\r\n\r\n distribution_function = _make_execution_function(model, mode)\r\n dist_utils.set_distributed_function(model, key, distribution_function)\r\n return distribution_function\r\n\r\n\r\ndef _make_execution_function(model, mode):\r\n \"\"\"Creates a function to run one step of distributed model execution.\"\"\"\r\n per_replica_function = _make_replica_execution_function(mode)\r\n\r\n def distributed_function(input_iterator):\r\n \"\"\"A single step of the distributed execution across replicas.\"\"\"\r\n x, y, sample_weights = _prepare_feed_values(\r\n model, input_iterator, mode)\r\n # Call `Model.{train,test,predict}_on_batch` on every replica passing\r\n # PerReplicas as arguments. On every replica inside this call, each\r\n # PerReplica object will return the value for that replica. The outputs\r\n # are PerReplicas too.\r\n strategy = distribution_strategy_context.get_strategy()\r\n outputs = strategy.experimental_run_v2(\r\n per_replica_function, args=(model, x, y, sample_weights))\r\n # Out of PerReplica outputs reduce or pick values to return.\r\n all_outputs = dist_utils.unwrap_output_dict(\r\n strategy, outputs, mode)\r\n return all_outputs\r\n\r\n if not model.run_eagerly:\r\n distributed_function = def_function.function(\r\n distributed_function, autograph=False)\r\n\r\n def execution_function(input_fn):\r\n # `numpy` translates Tensors to values in Eager mode.\r\n return nest.map_structure(_non_none_constant_value,\r\n distributed_function(input_fn))\r\n\r\n return execution_function\r\n\r\n\r\ndef _non_none_constant_value(v):\r\n constant_value = tensor_util.constant_value(v)\r\n return constant_value if constant_value is not None else v\r\n\r\n\r\ndef _prepare_feed_values(model, inputs, mode):\r\n \"\"\"Prepare feed values to the model execution function.\r\n\r\n Arguments:\r\n model: Model to prepare feed values for.\r\n inputs: An iterator of model inputs, targets, and sample_weights.\r\n model inputs may be lists, single values, or dicts mapping input feed\r\n names to values.\r\n mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.\r\n\r\n Returns:\r\n Feed values for the model in the given mode. This is a tuple of\r\n the structure (inputs, targets, sample_weights), where each of\r\n (tuple, targets, sample_weights) may be a python list. Single values\r\n for inputs will always be wrapped in lists.\r\n \"\"\"\r\n inputs, targets, sample_weights = _get_input_from_iterator(inputs)\r\n\r\n # When the inputs are dict, then we want to flatten it in the same order as\r\n # the input layers, such that the data are fed into the input layers in the\r\n # correct order.\r\n if isinstance(inputs, dict):\r\n inputs = [inputs[key] for key in model._feed_input_names]\r\n else:\r\n inputs = training_utils.ModelInputs(inputs).as_list()\r\n\r\n if mode == ModeKeys.PREDICT:\r\n sample_weights = []\r\n targets = []\r\n\r\n ins = [inputs, targets, sample_weights]\r\n return tuple(ins)\r\n\r\n\r\ndef _get_input_from_iterator(iterator):\r\n \"\"\"Get elements from the iterator and verify the input shape and type.\"\"\"\r\n next_element = next(iterator)\r\n\r\n if (tensor_util.is_tensor(next_element) or\r\n isinstance(next_element, (dict, composite_tensor.CompositeTensor))):\r\n next_element = [next_element]\r\n if len(next_element) == 1:\r\n x, = next_element\r\n y = None\r\n sample_weights = None\r\n elif len(next_element) == 2:\r\n x, y = next_element\r\n sample_weights = None\r\n else:\r\n x, y, sample_weights = next_element\r\n\r\n # Validate that all the elements in x and y are of the same type and shape.\r\n dist_utils.validate_distributed_dataset_inputs(\r\n distribution_strategy_context.get_strategy(), x, y, sample_weights)\r\n return x, y, sample_weights\r\n\r\n\r\ndef _make_replica_execution_function(mode):\r\n \"\"\"A single step of the distributed execution on a replica.\"\"\"\r\n if mode == ModeKeys.TRAIN:\r\n func = train_on_batch\r\n elif mode == ModeKeys.TEST:\r\n func = test_on_batch\r\n else:\r\n def _predict_on_batch(model, x, y=None, sample_weights=None):\r\n del y, sample_weights\r\n return predict_on_batch(model, x)\r\n\r\n func = _predict_on_batch\r\n\r\n if mode != ModeKeys.PREDICT:\r\n # `reset_metrics` is set to False to maintain stateful metrics across\r\n # batch-level calls.\r\n func = functools.partial(func, reset_metrics=False)\r\n\r\n return func\r\n\r\n\r\ndef _prepare_model_with_inputs(model, dataset):\r\n \"\"\"Use the data from the adapter to config the model.\r\n\r\n Model need to be properly configured before training, eg build with inputs, or\r\n compile with inputs for subclass model.\r\n\r\n Args:\r\n model: a Keras model object.\r\n dataset: a eager dataset instance where the data will be extracted.\r\n \"\"\"\r\n if not model.inputs:\r\n inputs, target, _ = model._build_model_with_inputs(dataset, targets=None)\r\n else:\r\n inputs, target, _ = _get_input_from_iterator(iter(dataset))\r\n\r\n if not model._is_compiled and model.optimizer:\r\n model._compile_from_inputs(inputs, target, dataset, None)\r\n\r\n if target is not None:\r\n training_utils.prepare_sample_weight_modes(model._training_endpoints,\r\n model.sample_weight_mode)\r\n\r\n\r\ndef train_on_batch(\r\n model,\r\n x,\r\n y=None,\r\n sample_weight=None,\r\n class_weight=None,\r\n reset_metrics=True):\r\n \"\"\"Runs a single gradient update on a single batch of data.\r\n\r\n Arguments:\r\n model: The model to train.\r\n x: Input data. It could be:\r\n - A Numpy array (or array-like), or a list of arrays\r\n (in case the model has multiple inputs).\r\n - A TensorFlow tensor, or a list of tensors\r\n (in case the model has multiple inputs).\r\n - A dict mapping input names to the corresponding array/tensors,\r\n if the model has named inputs.\r\n - A `tf.data` dataset.\r\n y: Target data. Like the input data `x`, it could be either Numpy\r\n array(s) or TensorFlow tensor(s). It should be consistent with `x`\r\n (you cannot have Numpy inputs and tensor targets, or inversely). If\r\n `x` is a dataset `y` should not be specified\r\n (since targets will be obtained from the iterator).\r\n sample_weight: Optional array of the same length as x, containing\r\n weights to apply to the model's loss for each sample. In the case of\r\n temporal data, you can pass a 2D array with shape (samples,\r\n sequence_length), to apply a different weight to every timestep of\r\n every sample. In this case you should make sure to specify\r\n sample_weight_mode=\"temporal\" in compile(). This argument is not\r\n supported when `x` is a dataset.\r\n class_weight: Optional dictionary mapping class indices (integers) to a\r\n weight (float) to apply to the model's loss for the samples from this\r\n class during training. This can be useful to tell the model to \"pay\r\n more attention\" to samples from an under-represented class.\r\n reset_metrics: If `True`, the metrics returned will be only for this\r\n batch. If `False`, the metrics will be statefully accumulated across\r\n batches.\r\n\r\n Returns:\r\n Scalar training loss\r\n (if the model has a single output and no metrics)\r\n or list of scalars (if the model has multiple outputs\r\n and/or metrics). The attribute `model.metrics_names` will give you\r\n the display labels for the scalar outputs.\r\n\r\n Raises:\r\n ValueError: In case of invalid user-provided arguments.\r\n \"\"\"\r\n model._assert_compile_was_called()\r\n\r\n # TODO(scottzhu): Standardization should happen in the data handlers,\r\n ## not on a per batch basis in the *_on_batch methods\r\n # Validate and standardize user data.\r\n x, y, sample_weights = model._standardize_user_data(\r\n x, y, sample_weight=sample_weight, class_weight=class_weight,\r\n extract_tensors_from_dataset=True)\r\n batch_size = array_ops.shape(nest.flatten(x, expand_composites=True)[0])[0]\r\n # If `model._distribution_strategy` is True, then we are in a replica context\r\n # at this point because of the check above. `train_on_batch` is being run\r\n # for each replica by `model._distribution_strategy` and the same code path\r\n # as Eager is expected to be taken.\r\n outputs = training_eager.train_on_batch(\r\n model,\r\n x,\r\n y,\r\n sample_weights=sample_weights,\r\n output_loss_metrics=model._output_loss_metrics)\r\n\r\n if reset_metrics:\r\n model.reset_metrics()\r\n\r\n outputs['batch_size'] = math_ops.cast(batch_size, dtypes.int64)\r\n return outputs\r\n\r\n\r\ndef test_on_batch(model, x, y=None, sample_weight=None, reset_metrics=True):\r\n \"\"\"Test the model on a single batch of samples.\r\n\r\n Arguments:\r\n model: The model to test.\r\n x: Input data. It could be:\r\n - A Numpy array (or array-like), or a list of arrays\r\n (in case the model has multiple inputs).\r\n - A TensorFlow tensor, or a list of tensors\r\n (in case the model has multiple inputs).\r\n - A dict mapping input names to the corresponding array/tensors,\r\n if the model has named inputs.\r\n - A `tf.data` dataset.\r\n y: Target data. Like the input data `x`,\r\n it could be either Numpy array(s) or TensorFlow tensor(s).\r\n It should be consistent with `x` (you cannot have Numpy inputs and\r\n tensor targets, or inversely). If `x` is a dataset,\r\n `y` should not be specified\r\n (since targets will be obtained from the iterator).\r\n sample_weight: Optional array of the same length as x, containing\r\n weights to apply to the model's loss for each sample.\r\n In the case of temporal data, you can pass a 2D array\r\n with shape (samples, sequence_length),\r\n to apply a different weight to every timestep of every sample.\r\n In this case you should make sure to specify\r\n sample_weight_mode=\"temporal\" in compile(). This argument is not\r\n supported when `x` is a dataset.\r\n reset_metrics: If `True`, the metrics returned will be only for this\r\n batch. If `False`, the metrics will be statefully accumulated across\r\n batches.\r\n\r\n Returns:\r\n Scalar test loss (if the model has a single output and no metrics)\r\n or list of scalars (if the model has multiple outputs\r\n and/or metrics). The attribute `model.metrics_names` will give you\r\n the display labels for the scalar outputs.\r\n\r\n Raises:\r\n ValueError: In case of invalid user-provided arguments.\r\n \"\"\"\r\n model._assert_compile_was_called()\r\n\r\n # TODO(scottzhu): Standardization should happen in the data handlers,\r\n ## not on a per batch basis in the *_on_batch methods\r\n # Validate and standardize user data.\r\n x, y, sample_weights = model._standardize_user_data(\r\n x, y, sample_weight=sample_weight, extract_tensors_from_dataset=True)\r\n\r\n batch_size = array_ops.shape(nest.flatten(x, expand_composites=True)[0])[0]\r\n outputs = training_eager.test_on_batch(\r\n model,\r\n x,\r\n y,\r\n sample_weights=sample_weights,\r\n output_loss_metrics=model._output_loss_metrics)\r\n\r\n if reset_metrics:\r\n model.reset_metrics()\r\n\r\n outputs['batch_size'] = math_ops.cast(batch_size, dtypes.int64)\r\n return outputs\r\n\r\n\r\ndef predict_on_batch(model, x):\r\n \"\"\"Returns predictions for a single batch of samples.\r\n\r\n Arguments:\r\n model: The model to predict with.\r\n x: Input data. It could be:\r\n - A Numpy array (or array-like), or a list of arrays\r\n (in case the model has multiple inputs).\r\n - A TensorFlow tensor, or a list of tensors\r\n (in case the model has multiple inputs).\r\n - A `tf.data` dataset.\r\n\r\n Returns:\r\n Numpy array(s) of predictions.\r\n\r\n Raises:\r\n ValueError: In case of mismatch between given number of inputs and\r\n expectations of the model.\r\n \"\"\"\r\n # TODO(scottzhu): Standardization should happen in the data handlers,\r\n ## not on a per batch basis in the *_on_batch methods\r\n # Validate and standardize user data.\r\n inputs, _, _ = model._standardize_user_data(\r\n x, extract_tensors_from_dataset=True)\r\n\r\n # If `model._distribution_strategy` is True, then we are in a replica context\r\n # at this point.\r\n inputs = training_utils.cast_if_floating_dtype(inputs)\r\n if isinstance(inputs, collections.Sequence):\r\n # Unwrap lists with only one input, as we do when training on batch\r\n if len(inputs) == 1:\r\n inputs = inputs[0]\r\n\r\n with backend.eager_learning_phase_scope(0):\r\n return model(inputs) # pylint: disable=not-callable\r\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"tensorboard_logging provides logging that is also written to the events file.\r\n\r\nAny messages logged via this module will be logged both via the platform logging\r\nmechanism and to the SummaryWriter set via `set_summary_writer`. This is useful\r\nfor logging messages that you might want to be visible from inside TensorBoard\r\nor that should be permanently associated with the training session.\r\n\r\nYou can use this just like the logging module:\r\n\r\n>>> tensorboard_logging.set_summary_writer(summary_writer)\r\n>>> tensorboard_logging.info(\"my %s\", \"message\")\r\n>>> tensorboard_logging.log(tensorboard_logging.WARN, \"something\")\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport time\r\n\r\nfrom tensorflow.core.util import event_pb2\r\nfrom tensorflow.python.platform import tf_logging as logging\r\n\r\nDEBUG = 'DEBUG'\r\nINFO = 'INFO'\r\nWARN = 'WARN'\r\nERROR = 'ERROR'\r\nFATAL = 'FATAL'\r\n\r\n# Messages with levels below this verbosity will not be logged.\r\n_verbosity = WARN\r\n\r\n# A value meaning 'not set yet' so we can use None to mean 'user actively told\r\n# us they don't want a SummaryWriter'.\r\n_sentinel_summary_writer = object()\r\n\r\n# The SummaryWriter instance to use when logging, or None to not log, or\r\n# _sentinel_summary_writer to indicate that the user hasn't called\r\n# set_summary_writer yet.\r\n_summary_writer = _sentinel_summary_writer\r\n\r\n# Map from the tensorboard_logging logging enum values to the proto's enum\r\n# values.\r\n_LEVEL_PROTO_MAP = {\r\n DEBUG: event_pb2.LogMessage.DEBUGGING,\r\n INFO: event_pb2.LogMessage.INFO,\r\n WARN: event_pb2.LogMessage.WARN,\r\n ERROR: event_pb2.LogMessage.ERROR,\r\n FATAL: event_pb2.LogMessage.FATAL,\r\n}\r\n\r\n# Map from the tensorboard_logging module levels to the logging module levels.\r\n_PLATFORM_LOGGING_LEVEL_MAP = {\r\n DEBUG: logging.DEBUG,\r\n INFO: logging.INFO,\r\n WARN: logging.WARN,\r\n ERROR: logging.ERROR,\r\n FATAL: logging.FATAL\r\n}\r\n\r\n\r\ndef get_verbosity():\r\n return _verbosity\r\n\r\n\r\ndef set_verbosity(verbosity):\r\n _check_verbosity(verbosity)\r\n global _verbosity\r\n _verbosity = verbosity\r\n\r\n\r\ndef _check_verbosity(verbosity):\r\n if verbosity not in _LEVEL_PROTO_MAP:\r\n raise ValueError('Level %s is not a valid tensorboard_logging level' %\r\n verbosity)\r\n\r\n\r\ndef set_summary_writer(summary_writer):\r\n \"\"\"Sets the summary writer that events will be logged to.\r\n\r\n Calling any logging methods inside this module without calling this method\r\n will fail. If you don't want to log, call `set_summary_writer(None)`.\r\n\r\n Args:\r\n summary_writer: Either a SummaryWriter or None. None will cause messages not\r\n to be logged to any SummaryWriter, but they will still be passed to the\r\n platform logging module.\r\n \"\"\"\r\n global _summary_writer\r\n _summary_writer = summary_writer\r\n\r\n\r\ndef _clear_summary_writer():\r\n \"\"\"Makes all subsequent log invocations error.\r\n\r\n This is only used for testing. If you want to disable TensorBoard logging,\r\n call `set_summary_writer(None)` instead.\r\n \"\"\"\r\n global _summary_writer\r\n _summary_writer = _sentinel_summary_writer\r\n\r\n\r\ndef log(level, message, *args):\r\n \"\"\"Conditionally logs `message % args` at the level `level`.\r\n\r\n Note that tensorboard_logging verbosity and logging verbosity are separate;\r\n the message will always be passed through to the logging module regardless of\r\n whether it passes the tensorboard_logging verbosity check.\r\n\r\n Args:\r\n level: The verbosity level to use. Must be one of\r\n tensorboard_logging.{DEBUG, INFO, WARN, ERROR, FATAL}.\r\n message: The message template to use.\r\n *args: Arguments to interpolate to the message template, if any.\r\n\r\n Raises:\r\n ValueError: If `level` is not a valid logging level.\r\n RuntimeError: If the `SummaryWriter` to use has not been set.\r\n \"\"\"\r\n if _summary_writer is _sentinel_summary_writer:\r\n raise RuntimeError('Must call set_summary_writer before doing any '\r\n 'logging from tensorboard_logging')\r\n _check_verbosity(level)\r\n proto_level = _LEVEL_PROTO_MAP[level]\r\n if proto_level >= _LEVEL_PROTO_MAP[_verbosity]:\r\n log_message = event_pb2.LogMessage(level=proto_level,\r\n message=message % args)\r\n event = event_pb2.Event(wall_time=time.time(), log_message=log_message)\r\n\r\n if _summary_writer:\r\n _summary_writer.add_event(event)\r\n\r\n logging.log(_PLATFORM_LOGGING_LEVEL_MAP[level], message, *args)\r\n\r\n\r\ndef debug(message, *args):\r\n log(DEBUG, message, *args)\r\n\r\n\r\ndef info(message, *args):\r\n log(INFO, message, *args)\r\n\r\n\r\ndef warn(message, *args):\r\n log(WARN, message, *args)\r\n\r\n\r\ndef error(message, *args):\r\n log(ERROR, message, *args)\r\n\r\n\r\ndef fatal(message, *args):\r\n log(FATAL, message, *args)\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n# pylint: disable=g-import-not-at-top\r\n\"\"\"Utilities for file download and caching.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom abc import abstractmethod\r\nfrom contextlib import closing\r\nimport gc\r\nimport hashlib\r\nimport multiprocessing\r\nfrom multiprocessing.pool import ThreadPool\r\nimport os\r\nimport random\r\nimport shutil\r\nimport signal\r\nimport sys\r\nimport tarfile\r\nimport threading\r\nimport time\r\nimport weakref\r\nimport zipfile\r\n\r\nimport numpy as np\r\nimport six\r\nfrom six.moves.urllib.error import HTTPError\r\nfrom six.moves.urllib.error import URLError\r\nfrom six.moves.urllib.request import urlopen\r\n\r\nfrom tensorflow.python.keras.utils.generic_utils import Progbar\r\nfrom tensorflow.python.util import tf_inspect\r\nfrom tensorflow.python.util.tf_export import keras_export\r\n\r\n\r\ntry:\r\n import queue\r\nexcept ImportError:\r\n import Queue as queue\r\n\r\n\r\nif sys.version_info[0] == 2:\r\n\r\n def urlretrieve(url, filename, reporthook=None, data=None):\r\n \"\"\"Replacement for `urlretrive` for Python 2.\r\n\r\n Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy\r\n `urllib` module, known to have issues with proxy management.\r\n\r\n Arguments:\r\n url: url to retrieve.\r\n filename: where to store the retrieved data locally.\r\n reporthook: a hook function that will be called once\r\n on establishment of the network connection and once\r\n after each block read thereafter.\r\n The hook will be passed three arguments;\r\n a count of blocks transferred so far,\r\n a block size in bytes, and the total size of the file.\r\n data: `data` argument passed to `urlopen`.\r\n \"\"\"\r\n\r\n def chunk_read(response, chunk_size=8192, reporthook=None):\r\n content_type = response.info().get('Content-Length')\r\n total_size = -1\r\n if content_type is not None:\r\n total_size = int(content_type.strip())\r\n count = 0\r\n while True:\r\n chunk = response.read(chunk_size)\r\n count += 1\r\n if reporthook is not None:\r\n reporthook(count, chunk_size, total_size)\r\n if chunk:\r\n yield chunk\r\n else:\r\n break\r\n\r\n response = urlopen(url, data)\r\n with open(filename, 'wb') as fd:\r\n for chunk in chunk_read(response, reporthook=reporthook):\r\n fd.write(chunk)\r\nelse:\r\n from six.moves.urllib.request import urlretrieve\r\n\r\n\r\ndef is_generator_or_sequence(x):\r\n \"\"\"Check if `x` is a Keras generator type.\"\"\"\r\n return tf_inspect.isgenerator(x) or isinstance(x, Sequence)\r\n\r\n\r\ndef _extract_archive(file_path, path='.', archive_format='auto'):\r\n \"\"\"Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.\r\n\r\n Arguments:\r\n file_path: path to the archive file\r\n path: path to extract the archive file\r\n archive_format: Archive format to try for extracting the file.\r\n Options are 'auto', 'tar', 'zip', and None.\r\n 'tar' includes tar, tar.gz, and tar.bz files.\r\n The default 'auto' is ['tar', 'zip'].\r\n None or an empty list will return no matches found.\r\n\r\n Returns:\r\n True if a match was found and an archive extraction was completed,\r\n False otherwise.\r\n \"\"\"\r\n if archive_format is None:\r\n return False\r\n if archive_format == 'auto':\r\n archive_format = ['tar', 'zip']\r\n if isinstance(archive_format, six.string_types):\r\n archive_format = [archive_format]\r\n\r\n for archive_type in archive_format:\r\n if archive_type == 'tar':\r\n open_fn = tarfile.open\r\n is_match_fn = tarfile.is_tarfile\r\n if archive_type == 'zip':\r\n open_fn = zipfile.ZipFile\r\n is_match_fn = zipfile.is_zipfile\r\n\r\n if is_match_fn(file_path):\r\n with open_fn(file_path) as archive:\r\n try:\r\n archive.extractall(path)\r\n except (tarfile.TarError, RuntimeError, KeyboardInterrupt):\r\n if os.path.exists(path):\r\n if os.path.isfile(path):\r\n os.remove(path)\r\n else:\r\n shutil.rmtree(path)\r\n raise\r\n return True\r\n return False\r\n\r\n\r\n@keras_export('keras.utils.get_file')\r\ndef get_file(fname,\r\n origin,\r\n untar=False,\r\n md5_hash=None,\r\n file_hash=None,\r\n cache_subdir='datasets',\r\n hash_algorithm='auto',\r\n extract=False,\r\n archive_format='auto',\r\n cache_dir=None):\r\n \"\"\"Downloads a file from a URL if it not already in the cache.\r\n\r\n By default the file at the url `origin` is downloaded to the\r\n cache_dir `~/.keras`, placed in the cache_subdir `datasets`,\r\n and given the filename `fname`. The final location of a file\r\n `example.txt` would therefore be `~/.keras/datasets/example.txt`.\r\n\r\n Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.\r\n Passing a hash will verify the file after download. The command line\r\n programs `shasum` and `sha256sum` can compute the hash.\r\n\r\n Arguments:\r\n fname: Name of the file. If an absolute path `/path/to/file.txt` is\r\n specified the file will be saved at that location.\r\n origin: Original URL of the file.\r\n untar: Deprecated in favor of 'extract'.\r\n boolean, whether the file should be decompressed\r\n md5_hash: Deprecated in favor of 'file_hash'.\r\n md5 hash of the file for verification\r\n file_hash: The expected hash string of the file after download.\r\n The sha256 and md5 hash algorithms are both supported.\r\n cache_subdir: Subdirectory under the Keras cache dir where the file is\r\n saved. If an absolute path `/path/to/folder` is\r\n specified the file will be saved at that location.\r\n hash_algorithm: Select the hash algorithm to verify the file.\r\n options are 'md5', 'sha256', and 'auto'.\r\n The default 'auto' detects the hash algorithm in use.\r\n extract: True tries extracting the file as an Archive, like tar or zip.\r\n archive_format: Archive format to try for extracting the file.\r\n Options are 'auto', 'tar', 'zip', and None.\r\n 'tar' includes tar, tar.gz, and tar.bz files.\r\n The default 'auto' is ['tar', 'zip'].\r\n None or an empty list will return no matches found.\r\n cache_dir: Location to store cached files, when None it\r\n defaults to the [Keras\r\n Directory](/faq/#where-is-the-keras-configuration-filed-stored).\r\n\r\n Returns:\r\n Path to the downloaded file\r\n \"\"\"\r\n if cache_dir is None:\r\n cache_dir = os.path.join(os.path.expanduser('~'), '.keras')\r\n if md5_hash is not None and file_hash is None:\r\n file_hash = md5_hash\r\n hash_algorithm = 'md5'\r\n datadir_base = os.path.expanduser(cache_dir)\r\n if not os.access(datadir_base, os.W_OK):\r\n datadir_base = os.path.join('/tmp', '.keras')\r\n datadir = os.path.join(datadir_base, cache_subdir)\r\n if not os.path.exists(datadir):\r\n os.makedirs(datadir)\r\n\r\n if untar:\r\n untar_fpath = os.path.join(datadir, fname)\r\n fpath = untar_fpath + '.tar.gz'\r\n else:\r\n fpath = os.path.join(datadir, fname)\r\n\r\n download = False\r\n if os.path.exists(fpath):\r\n # File found; verify integrity if a hash was provided.\r\n if file_hash is not None:\r\n if not validate_file(fpath, file_hash, algorithm=hash_algorithm):\r\n print('A local file was found, but it seems to be '\r\n 'incomplete or outdated because the ' + hash_algorithm +\r\n ' file hash does not match the original value of ' + file_hash +\r\n ' so we will re-download the data.')\r\n download = True\r\n else:\r\n download = True\r\n\r\n if download:\r\n print('Downloading data from', origin)\r\n\r\n class ProgressTracker(object):\r\n # Maintain progbar for the lifetime of download.\r\n # This design was chosen for Python 2.7 compatibility.\r\n progbar = None\r\n\r\n def dl_progress(count, block_size, total_size):\r\n if ProgressTracker.progbar is None:\r\n if total_size == -1:\r\n total_size = None\r\n ProgressTracker.progbar = Progbar(total_size)\r\n else:\r\n ProgressTracker.progbar.update(count * block_size)\r\n\r\n error_msg = 'URL fetch failure on {}: {} -- {}'\r\n try:\r\n try:\r\n urlretrieve(origin, fpath, dl_progress)\r\n except HTTPError as e:\r\n raise Exception(error_msg.format(origin, e.code, e.msg))\r\n except URLError as e:\r\n raise Exception(error_msg.format(origin, e.errno, e.reason))\r\n except (Exception, KeyboardInterrupt) as e:\r\n if os.path.exists(fpath):\r\n os.remove(fpath)\r\n raise\r\n ProgressTracker.progbar = None\r\n\r\n if untar:\r\n if not os.path.exists(untar_fpath):\r\n _extract_archive(fpath, datadir, archive_format='tar')\r\n return untar_fpath\r\n\r\n if extract:\r\n _extract_archive(fpath, datadir, archive_format)\r\n\r\n return fpath\r\n\r\n\r\ndef _hash_file(fpath, algorithm='sha256', chunk_size=65535):\r\n \"\"\"Calculates a file sha256 or md5 hash.\r\n\r\n Example:\r\n\r\n ```python\r\n >>> from keras.data_utils import _hash_file\r\n >>> _hash_file('/path/to/file.zip')\r\n 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'\r\n ```\r\n\r\n Arguments:\r\n fpath: path to the file being validated\r\n algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.\r\n The default 'auto' detects the hash algorithm in use.\r\n chunk_size: Bytes to read at a time, important for large files.\r\n\r\n Returns:\r\n The file hash\r\n \"\"\"\r\n if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):\r\n hasher = hashlib.sha256()\r\n else:\r\n hasher = hashlib.md5()\r\n\r\n with open(fpath, 'rb') as fpath_file:\r\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\r\n hasher.update(chunk)\r\n\r\n return hasher.hexdigest()\r\n\r\n\r\ndef validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\r\n \"\"\"Validates a file against a sha256 or md5 hash.\r\n\r\n Arguments:\r\n fpath: path to the file being validated\r\n file_hash: The expected hash string of the file.\r\n The sha256 and md5 hash algorithms are both supported.\r\n algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.\r\n The default 'auto' detects the hash algorithm in use.\r\n chunk_size: Bytes to read at a time, important for large files.\r\n\r\n Returns:\r\n Whether the file is valid\r\n \"\"\"\r\n if (algorithm == 'sha256') or (algorithm == 'auto' and len(file_hash) == 64):\r\n hasher = 'sha256'\r\n else:\r\n hasher = 'md5'\r\n\r\n if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n@keras_export('keras.utils.Sequence')\r\nclass Sequence(object):\r\n \"\"\"Base object for fitting to a sequence of data, such as a dataset.\r\n\r\n Every `Sequence` must implement the `__getitem__` and the `__len__` methods.\r\n If you want to modify your dataset between epochs you may implement\r\n `on_epoch_end`.\r\n The method `__getitem__` should return a complete batch.\r\n\r\n Notes:\r\n\r\n `Sequence` are a safer way to do multiprocessing. This structure guarantees\r\n that the network will only train once\r\n on each sample per epoch which is not the case with generators.\r\n\r\n Examples:\r\n\r\n ```python\r\n from skimage.io import imread\r\n from skimage.transform import resize\r\n import numpy as np\r\n import math\r\n\r\n # Here, `x_set` is list of path to the images\r\n # and `y_set` are the associated classes.\r\n\r\n class CIFAR10Sequence(Sequence):\r\n\r\n def __init__(self, x_set, y_set, batch_size):\r\n self.x, self.y = x_set, y_set\r\n self.batch_size = batch_size\r\n\r\n def __len__(self):\r\n return math.ceil(len(self.x) / self.batch_size)\r\n\r\n def __getitem__(self, idx):\r\n batch_x = self.x[idx * self.batch_size:(idx + 1) *\r\n self.batch_size]\r\n batch_y = self.y[idx * self.batch_size:(idx + 1) *\r\n self.batch_size]\r\n\r\n return np.array([\r\n resize(imread(file_name), (200, 200))\r\n for file_name in batch_x]), np.array(batch_y)\r\n ```\r\n \"\"\"\r\n\r\n @abstractmethod\r\n def __getitem__(self, index):\r\n \"\"\"Gets batch at position `index`.\r\n\r\n Arguments:\r\n index: position of the batch in the Sequence.\r\n\r\n Returns:\r\n A batch\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n @abstractmethod\r\n def __len__(self):\r\n \"\"\"Number of batch in the Sequence.\r\n\r\n Returns:\r\n The number of batches in the Sequence.\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n def on_epoch_end(self):\r\n \"\"\"Method called at the end of every epoch.\r\n \"\"\"\r\n pass\r\n\r\n def __iter__(self):\r\n \"\"\"Create a generator that iterate over the Sequence.\"\"\"\r\n for item in (self[i] for i in range(len(self))):\r\n yield item\r\n\r\n\r\ndef iter_sequence_infinite(seq):\r\n \"\"\"Iterates indefinitely over a Sequence.\r\n\r\n Arguments:\r\n seq: Sequence instance.\r\n\r\n Yields:\r\n Batches of data from the Sequence.\r\n \"\"\"\r\n while True:\r\n for item in seq:\r\n yield item\r\n\r\n\r\n# Global variables to be shared across processes\r\n_SHARED_SEQUENCES = {}\r\n# We use a Value to provide unique id to different processes.\r\n_SEQUENCE_COUNTER = None\r\n\r\n\r\n# Because multiprocessing pools are inherently unsafe, starting from a clean\r\n# state can be essential to avoiding deadlocks. In order to accomplish this, we\r\n# need to be able to check on the status of Pools that we create.\r\n_DATA_POOLS = weakref.WeakSet()\r\n_WORKER_ID_QUEUE = None # Only created if needed.\r\n_WORKER_IDS = set()\r\n\r\n\r\ndef get_worker_id_queue():\r\n \"\"\"Lazily create the queue to track worker ids.\"\"\"\r\n global _WORKER_ID_QUEUE\r\n if _WORKER_ID_QUEUE is None:\r\n _WORKER_ID_QUEUE = multiprocessing.Queue()\r\n return _WORKER_ID_QUEUE\r\n\r\n\r\ndef init_pool(seqs):\r\n global _SHARED_SEQUENCES\r\n _SHARED_SEQUENCES = seqs\r\n\r\n\r\n@keras_export('keras.experimental.terminate_keras_multiprocessing_pools')\r\ndef terminate_keras_multiprocessing_pools(grace_period=0.1, use_sigkill=False):\r\n \"\"\"Destroy Keras' multiprocessing pools to prevent deadlocks.\r\n\r\n In general multiprocessing.Pool can interact quite badly with other, seemingly\r\n unrelated, parts of a codebase due to Pool's reliance on fork. This method\r\n cleans up all pools which are known to belong to Keras (and thus can be safely\r\n terminated).\r\n\r\n Args:\r\n grace_period: Time (in seconds) to wait for process cleanup to propagate.\r\n use_sigkill: Boolean of whether or not to perform a cleanup pass using\r\n SIGKILL.\r\n\r\n Returns:\r\n A list of human readable strings describing all issues encountered. It is up\r\n to the caller to decide whether to treat this as an error condition.\r\n \"\"\"\r\n errors = []\r\n\r\n # First cleanup the pools spawned by Keras. If we start killing workers and\r\n # a parent pool is still alive it will just spawn replacements which we don't\r\n # want.\r\n gc.collect()\r\n for pool in _DATA_POOLS:\r\n pool.close()\r\n pool.terminate()\r\n # We do not join the pool, because that would wait forever if a worker\r\n # refused to exit.\r\n\r\n # Finally, delete our reference to the pool so that we do not block garbage\r\n # collection.\r\n del pool\r\n\r\n # If there were any pools, sleep for a small grace period to allow everything\r\n # to finalize.\r\n if _DATA_POOLS:\r\n time.sleep(grace_period)\r\n\r\n # Now we kill any workers which are still alive. However we must compare\r\n # the worker identifier to the set of identifiers which are known to have been\r\n # spawned by pools belonging to Keras to avoid deleting unrelated workers.\r\n # First we call the .terminate() method of a worker, and then if it still\r\n # persists we directly send a signal to the process. Certain worker tasks may\r\n # be able to gracefully handle shutdown, so we send a SIGTERM and then\r\n # optionally follow up with a SIGKILL.\r\n visited_workers = set()\r\n cleanup_passes = ['.terminate', 'SIGTERM']\r\n if use_sigkill:\r\n cleanup_passes.append('SIGKILL')\r\n cleanup_passes.append('log')\r\n\r\n for cleanup_pass in cleanup_passes:\r\n while True:\r\n # In rare cases, queue.qsize() overestimates the number of elements. This\r\n # loop is designed to be more robust.\r\n try:\r\n _WORKER_IDS.add(get_worker_id_queue().get_nowait())\r\n except queue.Empty:\r\n break\r\n\r\n gc.collect()\r\n workers_terminated_this_pass = False\r\n for worker in multiprocessing.active_children():\r\n ident = worker.ident\r\n if ident in _WORKER_IDS and worker.is_alive():\r\n try:\r\n if cleanup_pass == '.terminate':\r\n # First we ask nicely.\r\n worker.terminate()\r\n worker.join(timeout=grace_period)\r\n visited_workers.add(ident)\r\n workers_terminated_this_pass = True\r\n elif cleanup_pass in ('SIGTERM', 'SIGKILL'):\r\n # Then we ask increasingly tersely.\r\n os.kill(worker.pid, signal.SIGKILL if cleanup_pass == 'SIGKILL'\r\n else signal.SIGTERM)\r\n workers_terminated_this_pass = True\r\n\r\n elif cleanup_pass == 'log':\r\n # And finally we give up and log the failure.\r\n errors.append('worker still alive: {}, pid={}, hash={}'\r\n .format(worker.name, worker.pid, hash(worker)))\r\n\r\n except OSError:\r\n # Worker exited since the start of this loop.\r\n pass\r\n\r\n if workers_terminated_this_pass:\r\n # There can be a small propagation delay between worker destruction and\r\n # workers reporting False for is_alive and no longer appearing in the\r\n # list of active children. Once again, we sleep for a small grace period.\r\n # This prevents false positives from workers which are simply still in the\r\n # process of spinning down.\r\n time.sleep(grace_period)\r\n\r\n # Finally we remove the visited worker ids to handle the edge case that a\r\n # pid is reused.\r\n _WORKER_IDS.difference_update(visited_workers)\r\n\r\n gc.collect()\r\n for pool in _DATA_POOLS:\r\n errors.append('pool still exists: {}, hash={}'.format(pool, hash(pool)))\r\n\r\n return errors\r\n\r\n\r\ndef get_index(uid, i):\r\n \"\"\"Get the value from the Sequence `uid` at index `i`.\r\n\r\n To allow multiple Sequences to be used at the same time, we use `uid` to\r\n get a specific one. A single Sequence would cause the validation to\r\n overwrite the training Sequence.\r\n\r\n Arguments:\r\n uid: int, Sequence identifier\r\n i: index\r\n\r\n Returns:\r\n The value at index `i`.\r\n \"\"\"\r\n return _SHARED_SEQUENCES[uid][i]\r\n\r\n\r\n@keras_export('keras.utils.SequenceEnqueuer')\r\nclass SequenceEnqueuer(object):\r\n \"\"\"Base class to enqueue inputs.\r\n\r\n The task of an Enqueuer is to use parallelism to speed up preprocessing.\r\n This is done with processes or threads.\r\n\r\n Example:\r\n\r\n ```python\r\n enqueuer = SequenceEnqueuer(...)\r\n enqueuer.start()\r\n datas = enqueuer.get()\r\n for data in datas:\r\n # Use the inputs; training, evaluating, predicting.\r\n # ... stop sometime.\r\n enqueuer.close()\r\n ```\r\n\r\n The `enqueuer.get()` should be an infinite stream of datas.\r\n \"\"\"\r\n\r\n def __init__(self, sequence,\r\n use_multiprocessing=False):\r\n self.sequence = sequence\r\n self.use_multiprocessing = use_multiprocessing\r\n\r\n global _SEQUENCE_COUNTER\r\n if _SEQUENCE_COUNTER is None:\r\n try:\r\n _SEQUENCE_COUNTER = multiprocessing.Value('i', 0)\r\n except OSError:\r\n # In this case the OS does not allow us to use\r\n # multiprocessing. We resort to an int\r\n # for enqueuer indexing.\r\n _SEQUENCE_COUNTER = 0\r\n\r\n if isinstance(_SEQUENCE_COUNTER, int):\r\n self.uid = _SEQUENCE_COUNTER\r\n _SEQUENCE_COUNTER += 1\r\n else:\r\n # Doing Multiprocessing.Value += x is not process-safe.\r\n with _SEQUENCE_COUNTER.get_lock():\r\n self.uid = _SEQUENCE_COUNTER.value\r\n _SEQUENCE_COUNTER.value += 1\r\n\r\n self.workers = 0\r\n self.executor_fn = None\r\n self.queue = None\r\n self.run_thread = None\r\n self.stop_signal = None\r\n\r\n def is_running(self):\r\n return self.stop_signal is not None and not self.stop_signal.is_set()\r\n\r\n def start(self, workers=1, max_queue_size=10):\r\n \"\"\"Starts the handler's workers.\r\n\r\n Arguments:\r\n workers: Number of workers.\r\n max_queue_size: queue size\r\n (when full, workers could block on `put()`)\r\n \"\"\"\r\n if self.use_multiprocessing:\r\n self.executor_fn = self._get_executor_init(workers)\r\n else:\r\n # We do not need the init since it's threads.\r\n self.executor_fn = lambda _: ThreadPool(workers)\r\n self.workers = workers\r\n self.queue = queue.Queue(max_queue_size)\r\n self.stop_signal = threading.Event()\r\n self.run_thread = threading.Thread(target=self._run)\r\n self.run_thread.daemon = True\r\n self.run_thread.start()\r\n\r\n def _send_sequence(self):\r\n \"\"\"Sends current Iterable to all workers.\"\"\"\r\n # For new processes that may spawn\r\n _SHARED_SEQUENCES[self.uid] = self.sequence\r\n\r\n def stop(self, timeout=None):\r\n \"\"\"Stops running threads and wait for them to exit, if necessary.\r\n\r\n Should be called by the same thread which called `start()`.\r\n\r\n Arguments:\r\n timeout: maximum time to wait on `thread.join()`\r\n \"\"\"\r\n self.stop_signal.set()\r\n with self.queue.mutex:\r\n self.queue.queue.clear()\r\n self.queue.unfinished_tasks = 0\r\n self.queue.not_full.notify()\r\n self.run_thread.join(timeout)\r\n _SHARED_SEQUENCES[self.uid] = None\r\n\r\n @abstractmethod\r\n def _run(self):\r\n \"\"\"Submits request to the executor and queue the `Future` objects.\"\"\"\r\n raise NotImplementedError\r\n\r\n @abstractmethod\r\n def _get_executor_init(self, workers):\r\n \"\"\"Gets the Pool initializer for multiprocessing.\r\n\r\n Arguments:\r\n workers: Number of workers.\r\n\r\n Returns:\r\n Function, a Function to initialize the pool\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n @abstractmethod\r\n def get(self):\r\n \"\"\"Creates a generator to extract data from the queue.\r\n\r\n Skip the data if it is `None`.\r\n # Returns\r\n Generator yielding tuples `(inputs, targets)`\r\n or `(inputs, targets, sample_weights)`.\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n\r\n@keras_export('keras.utils.OrderedEnqueuer')\r\nclass OrderedEnqueuer(SequenceEnqueuer):\r\n \"\"\"Builds a Enqueuer from a Sequence.\r\n\r\n Used in `fit_generator`, `evaluate_generator`, `predict_generator`.\r\n\r\n Arguments:\r\n sequence: A `tf.keras.utils.data_utils.Sequence` object.\r\n use_multiprocessing: use multiprocessing if True, otherwise threading\r\n shuffle: whether to shuffle the data at the beginning of each epoch\r\n \"\"\"\r\n\r\n def __init__(self, sequence, use_multiprocessing=False, shuffle=False):\r\n super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)\r\n self.shuffle = shuffle\r\n\r\n def _get_executor_init(self, workers):\r\n \"\"\"Gets the Pool initializer for multiprocessing.\r\n\r\n Arguments:\r\n workers: Number of workers.\r\n\r\n Returns:\r\n Function, a Function to initialize the pool\r\n \"\"\"\r\n def pool_fn(seqs):\r\n pool = multiprocessing.Pool(\r\n workers, initializer=init_pool_generator,\r\n initargs=(seqs, None, get_worker_id_queue()))\r\n _DATA_POOLS.add(pool)\r\n return pool\r\n\r\n return pool_fn\r\n\r\n def _wait_queue(self):\r\n \"\"\"Wait for the queue to be empty.\"\"\"\r\n while True:\r\n time.sleep(0.1)\r\n if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():\r\n return\r\n\r\n def _run(self):\r\n \"\"\"Submits request to the executor and queue the `Future` objects.\"\"\"\r\n sequence = list(range(len(self.sequence)))\r\n self._send_sequence() # Share the initial sequence\r\n while True:\r\n if self.shuffle:\r\n random.shuffle(sequence)\r\n\r\n with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:\r\n for i in sequence:\r\n if self.stop_signal.is_set():\r\n return\r\n\r\n self.queue.put(\r\n executor.apply_async(get_index, (self.uid, i)), block=True)\r\n\r\n # Done with the current epoch, waiting for the final batches\r\n self._wait_queue()\r\n\r\n if self.stop_signal.is_set():\r\n # We're done\r\n return\r\n\r\n # Call the internal on epoch end.\r\n self.sequence.on_epoch_end()\r\n self._send_sequence() # Update the pool\r\n\r\n def get(self):\r\n \"\"\"Creates a generator to extract data from the queue.\r\n\r\n Skip the data if it is `None`.\r\n\r\n Yields:\r\n The next element in the queue, i.e. a tuple\r\n `(inputs, targets)` or\r\n `(inputs, targets, sample_weights)`.\r\n \"\"\"\r\n try:\r\n while self.is_running():\r\n inputs = self.queue.get(block=True).get()\r\n self.queue.task_done()\r\n if inputs is not None:\r\n yield inputs\r\n except Exception: # pylint: disable=broad-except\r\n self.stop()\r\n six.reraise(*sys.exc_info())\r\n\r\n\r\ndef init_pool_generator(gens, random_seed=None, id_queue=None):\r\n \"\"\"Initializer function for pool workers.\r\n\r\n Args:\r\n gens: State which should be made available to worker processes.\r\n random_seed: An optional value with which to seed child processes.\r\n id_queue: A multiprocessing Queue of worker ids. This is used to indicate\r\n that a worker process was created by Keras and can be terminated using\r\n the cleanup_all_keras_forkpools utility.\r\n \"\"\"\r\n global _SHARED_SEQUENCES\r\n _SHARED_SEQUENCES = gens\r\n\r\n worker_proc = multiprocessing.current_process()\r\n\r\n # name isn't used for anything, but setting a more descriptive name is helpful\r\n # when diagnosing orphaned processes.\r\n worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name)\r\n\r\n if random_seed is not None:\r\n np.random.seed(random_seed + worker_proc.ident)\r\n\r\n if id_queue is not None:\r\n # If a worker dies during init, the pool will just create a replacement.\r\n id_queue.put(worker_proc.ident, block=True, timeout=0.1)\r\n\r\n\r\ndef next_sample(uid):\r\n \"\"\"Gets the next value from the generator `uid`.\r\n\r\n To allow multiple generators to be used at the same time, we use `uid` to\r\n get a specific one. A single generator would cause the validation to\r\n overwrite the training generator.\r\n\r\n Arguments:\r\n uid: int, generator identifier\r\n\r\n Returns:\r\n The next value of generator `uid`.\r\n \"\"\"\r\n return six.next(_SHARED_SEQUENCES[uid])\r\n\r\n\r\n@keras_export('keras.utils.GeneratorEnqueuer')\r\nclass GeneratorEnqueuer(SequenceEnqueuer):\r\n \"\"\"Builds a queue out of a data generator.\r\n\r\n The provided generator can be finite in which case the class will throw\r\n a `StopIteration` exception.\r\n\r\n Used in `fit_generator`, `evaluate_generator`, `predict_generator`.\r\n\r\n Arguments:\r\n generator: a generator function which yields data\r\n use_multiprocessing: use multiprocessing if True, otherwise threading\r\n wait_time: time to sleep in-between calls to `put()`\r\n random_seed: Initial seed for workers,\r\n will be incremented by one for each worker.\r\n \"\"\"\r\n\r\n def __init__(self, sequence,\r\n use_multiprocessing=False,\r\n random_seed=None):\r\n super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing)\r\n self.random_seed = random_seed\r\n\r\n def _get_executor_init(self, workers):\r\n \"\"\"Gets the Pool initializer for multiprocessing.\r\n\r\n Arguments:\r\n workers: Number of works.\r\n\r\n Returns:\r\n A Function to initialize the pool\r\n \"\"\"\r\n def pool_fn(seqs):\r\n pool = multiprocessing.Pool(\r\n workers, initializer=init_pool_generator,\r\n initargs=(seqs, self.random_seed, get_worker_id_queue()))\r\n _DATA_POOLS.add(pool)\r\n return pool\r\n return pool_fn\r\n\r\n def _run(self):\r\n \"\"\"Submits request to the executor and queue the `Future` objects.\"\"\"\r\n self._send_sequence() # Share the initial generator\r\n with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:\r\n while True:\r\n if self.stop_signal.is_set():\r\n return\r\n\r\n self.queue.put(\r\n executor.apply_async(next_sample, (self.uid,)), block=True)\r\n\r\n def get(self):\r\n \"\"\"Creates a generator to extract data from the queue.\r\n\r\n Skip the data if it is `None`.\r\n\r\n Yields:\r\n The next element in the queue, i.e. a tuple\r\n `(inputs, targets)` or\r\n `(inputs, targets, sample_weights)`.\r\n \"\"\"\r\n try:\r\n while self.is_running():\r\n inputs = self.queue.get(block=True).get()\r\n self.queue.task_done()\r\n if inputs is not None:\r\n yield inputs\r\n except StopIteration:\r\n # Special case for finite generators\r\n last_ones = []\r\n while self.queue.qsize() > 0:\r\n last_ones.append(self.queue.get(block=True))\r\n # Wait for them to complete\r\n for f in last_ones:\r\n f.wait()\r\n # Keep the good ones\r\n last_ones = [future.get() for future in last_ones if future.successful()]\r\n for inputs in last_ones:\r\n if inputs is not None:\r\n yield inputs\r\n except Exception as e: # pylint: disable=broad-except\r\n self.stop()\r\n if 'generator already executing' in str(e):\r\n raise RuntimeError(\r\n 'Your generator is NOT thread-safe. '\r\n 'Keras requires a thread-safe generator when '\r\n '`use_multiprocessing=False, workers > 1`. ')\r\n six.reraise(*sys.exc_info())\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for ragged.squeeze.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom absl.testing import parameterized\r\n\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import errors\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops.ragged import ragged_conversion_ops\r\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\r\nfrom tensorflow.python.ops.ragged import ragged_squeeze_op\r\nfrom tensorflow.python.platform import googletest\r\n\r\n\r\n@test_util.run_all_in_graph_and_eager_modes\r\nclass RaggedSqueezeTest(test_util.TensorFlowTestCase,\r\n parameterized.TestCase):\r\n\r\n @parameterized.parameters([\r\n {\r\n 'input_list': []\r\n },\r\n {\r\n 'input_list': [[]],\r\n 'squeeze_ranks': [0]\r\n },\r\n {\r\n 'input_list': [[[[], []], [[], []]]],\r\n 'squeeze_ranks': [0]\r\n },\r\n ])\r\n def test_passing_empty(self, input_list, squeeze_ranks=None):\r\n rt = ragged_squeeze_op.squeeze(\r\n ragged_factory_ops.constant(input_list), squeeze_ranks)\r\n dt = array_ops.squeeze(constant_op.constant(input_list), squeeze_ranks)\r\n self.assertAllEqual(ragged_conversion_ops.to_tensor(rt), dt)\r\n\r\n @parameterized.parameters([\r\n {\r\n 'input_list': [[1]],\r\n 'squeeze_ranks': [0]\r\n },\r\n {\r\n 'input_list': [[1]],\r\n 'squeeze_ranks': [0, 1]\r\n },\r\n {\r\n 'input_list': [[1, 2]],\r\n 'squeeze_ranks': [0]\r\n },\r\n {\r\n 'input_list': [[1], [2]],\r\n 'squeeze_ranks': [1]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [0]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [1]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [3]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [0, 3]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [0, 1]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [1, 3]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [0, 1, 3]\r\n },\r\n {\r\n 'input_list': [[[1], [2]], [[3], [4]]],\r\n 'squeeze_ranks': [2]\r\n },\r\n {\r\n 'input_list': [[1], [2]],\r\n 'squeeze_ranks': [-1]\r\n },\r\n ])\r\n def test_passing_simple(self, input_list, squeeze_ranks=None):\r\n rt = ragged_squeeze_op.squeeze(\r\n ragged_factory_ops.constant(input_list), squeeze_ranks)\r\n dt = array_ops.squeeze(constant_op.constant(input_list), squeeze_ranks)\r\n self.assertAllEqual(ragged_conversion_ops.to_tensor(rt), dt)\r\n\r\n @parameterized.parameters([\r\n # ragged_conversion_ops.from_tensor does not work for this\r\n # {'input_list': [1]},\r\n {\r\n 'input_list': [[1]],\r\n 'squeeze_ranks': [0]\r\n },\r\n {\r\n 'input_list': [[1, 2]],\r\n 'squeeze_ranks': [0]\r\n },\r\n {\r\n 'input_list': [[1], [2]],\r\n 'squeeze_ranks': [1]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [0]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [1]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [3]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [0, 3]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [0, 1]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [1, 3]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [0, 1, 3]\r\n },\r\n {\r\n 'input_list': [[[1], [2]], [[3], [4]]],\r\n 'squeeze_ranks': [2]\r\n },\r\n ])\r\n def test_passing_simple_from_dense(self, input_list, squeeze_ranks=None):\r\n dt = constant_op.constant(input_list)\r\n rt = ragged_conversion_ops.from_tensor(dt)\r\n rt_s = ragged_squeeze_op.squeeze(rt, squeeze_ranks)\r\n dt_s = array_ops.squeeze(dt, squeeze_ranks)\r\n self.assertAllEqual(ragged_conversion_ops.to_tensor(rt_s), dt_s)\r\n\r\n @parameterized.parameters([\r\n {\r\n 'input_list': [[[[[[1]], [[1, 2]]]], [[[[]], [[]]]]]],\r\n 'output_list': [[[1], [1, 2]], [[], []]],\r\n 'squeeze_ranks': [0, 2, 4]\r\n },\r\n {\r\n 'input_list': [[[[[[1]], [[1, 2]]]], [[[[]], [[]]]]]],\r\n 'output_list': [[[[[1]], [[1, 2]]]], [[[[]], [[]]]]],\r\n 'squeeze_ranks': [0]\r\n },\r\n ])\r\n def test_passing_ragged(self, input_list, output_list, squeeze_ranks=None):\r\n rt = ragged_factory_ops.constant(input_list)\r\n rt_s = ragged_squeeze_op.squeeze(rt, squeeze_ranks)\r\n ref = ragged_factory_ops.constant(output_list)\r\n self.assertAllEqual(rt_s, ref)\r\n\r\n def test_passing_text(self):\r\n rt = ragged_factory_ops.constant([[[[[[[['H']], [['e']], [['l']], [['l']],\r\n [['o']]],\r\n [[['W']], [['o']], [['r']], [['l']],\r\n [['d']], [['!']]]]],\r\n [[[[['T']], [['h']], [['i']], [['s']]],\r\n [[['i']], [['s']]],\r\n [[['M']], [['e']], [['h']], [['r']],\r\n [['d']], [['a']], [['d']]],\r\n [[['.']]]]]]]])\r\n output_list = [[['H', 'e', 'l', 'l', 'o'], ['W', 'o', 'r', 'l', 'd', '!']],\r\n [['T', 'h', 'i', 's'], ['i', 's'],\r\n ['M', 'e', 'h', 'r', 'd', 'a', 'd'], ['.']]]\r\n ref = ragged_factory_ops.constant(output_list)\r\n rt_s = ragged_squeeze_op.squeeze(rt, [0, 1, 3, 6, 7])\r\n self.assertAllEqual(rt_s, ref)\r\n\r\n @parameterized.parameters([\r\n {\r\n 'input_list': [[]],\r\n 'squeeze_ranks': [1]\r\n },\r\n {\r\n 'input_list': [[1, 2]],\r\n 'squeeze_ranks': [1]\r\n },\r\n {\r\n 'input_list': [[1], [2]],\r\n 'squeeze_ranks': [0]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [0, 2]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [2]\r\n },\r\n {\r\n 'input_list': [[[1], [2]], [[3], [4]]],\r\n 'squeeze_ranks': [0]\r\n },\r\n {\r\n 'input_list': [[[1], [2]], [[3], [4]]],\r\n 'squeeze_ranks': [1]\r\n },\r\n {\r\n 'input_list': [[], []],\r\n 'squeeze_ranks': [1]\r\n },\r\n {\r\n 'input_list': [[[], []], [[], []]],\r\n 'squeeze_ranks': [1]\r\n },\r\n ])\r\n def test_failing_InvalidArgumentError(self, input_list, squeeze_ranks):\r\n with self.assertRaises(errors.InvalidArgumentError):\r\n self.evaluate(\r\n ragged_squeeze_op.squeeze(\r\n ragged_factory_ops.constant(input_list), squeeze_ranks))\r\n\r\n @parameterized.parameters([\r\n {\r\n 'input_list': [[]]\r\n },\r\n {\r\n 'input_list': [[1]]\r\n },\r\n {\r\n 'input_list': [[1, 2]]\r\n },\r\n {\r\n 'input_list': [[[1], [2]], [[3], [4]]]\r\n },\r\n {\r\n 'input_list': [[1]]\r\n },\r\n {\r\n 'input_list': [[[1], [2]], [[3], [4]]]\r\n },\r\n {\r\n 'input_list': [[[[12], [11]]]]\r\n },\r\n ])\r\n def test_failing_no_squeeze_dim_specified(self, input_list):\r\n with self.assertRaises(ValueError):\r\n ragged_squeeze_op.squeeze(ragged_factory_ops.constant(input_list))\r\n\r\n @parameterized.parameters([\r\n {\r\n 'input_list': [[[[12], [11]]]],\r\n 'squeeze_ranks': [0, 1, 3]\r\n },\r\n ])\r\n def test_failing_axis_is_not_a_list(self, input_list, squeeze_ranks):\r\n with self.assertRaises(TypeError):\r\n tensor_ranks = constant_op.constant(squeeze_ranks)\r\n ragged_squeeze_op.squeeze(\r\n ragged_factory_ops.constant(input_list), tensor_ranks)\r\n\r\n\r\nif __name__ == '__main__':\r\n googletest.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Unit tests for linear regression example under TensorFlow eager execution.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport glob\r\nimport os\r\nimport shutil\r\nimport tempfile\r\nimport time\r\n\r\nimport tensorflow as tf\r\n\r\nimport tensorflow.contrib.eager as tfe\r\nfrom tensorflow.contrib.eager.python.examples.linear_regression import linear_regression\r\n\r\n\r\ndef device():\r\n return \"/device:GPU:0\" if tfe.num_gpus() > 0 else \"/device:CPU:0\"\r\n\r\n\r\nclass LinearRegressionTest(tf.test.TestCase):\r\n\r\n def setUp(self):\r\n super(LinearRegressionTest, self).setUp()\r\n self._tmp_logdir = tempfile.mkdtemp()\r\n\r\n def tearDown(self):\r\n shutil.rmtree(self._tmp_logdir)\r\n super(LinearRegressionTest, self).tearDown()\r\n\r\n def testSyntheticDataset(self):\r\n true_w = tf.random_uniform([3, 1])\r\n true_b = [1.0]\r\n batch_size = 10\r\n num_batches = 2\r\n noise_level = 0.\r\n dataset = linear_regression.synthetic_dataset(true_w, true_b, noise_level,\r\n batch_size, num_batches)\r\n\r\n it = tfe.Iterator(dataset)\r\n for _ in range(2):\r\n (xs, ys) = it.next()\r\n self.assertEqual((batch_size, 3), xs.shape)\r\n self.assertEqual((batch_size, 1), ys.shape)\r\n self.assertEqual(tf.float32, xs.dtype)\r\n self.assertEqual(tf.float32, ys.dtype)\r\n with self.assertRaises(StopIteration):\r\n it.next()\r\n\r\n def testLinearRegression(self):\r\n true_w = [[1.0], [-0.5], [2.0]]\r\n true_b = [1.0]\r\n\r\n model = linear_regression.LinearModel()\r\n dataset = linear_regression.synthetic_dataset(\r\n true_w, true_b, noise_level=0., batch_size=64, num_batches=40)\r\n\r\n with tf.device(device()):\r\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\r\n linear_regression.fit(model, dataset, optimizer, logdir=self._tmp_logdir)\r\n\r\n self.assertAllClose(true_w, model.variables[0].numpy(), rtol=1e-2)\r\n self.assertAllClose(true_b, model.variables[1].numpy(), rtol=1e-2)\r\n self.assertTrue(glob.glob(os.path.join(self._tmp_logdir, \"events.out.*\")))\r\n\r\n\r\nclass EagerLinearRegressionBenchmark(tf.test.Benchmark):\r\n\r\n def benchmarkEagerLinearRegression(self):\r\n num_epochs = 10\r\n num_batches = 200\r\n batch_size = 64\r\n dataset = linear_regression.synthetic_dataset(\r\n w=tf.random_uniform([3, 1]),\r\n b=tf.random_uniform([1]),\r\n noise_level=0.01,\r\n batch_size=batch_size,\r\n num_batches=num_batches)\r\n burn_in_dataset = dataset.take(10)\r\n\r\n model = linear_regression.LinearModel()\r\n\r\n with tf.device(device()):\r\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\r\n\r\n # Perform burn-in.\r\n linear_regression.fit(model, burn_in_dataset, optimizer)\r\n\r\n start_time = time.time()\r\n for _ in range(num_epochs):\r\n linear_regression.fit(model, dataset, optimizer)\r\n wall_time = time.time() - start_time\r\n\r\n examples_per_sec = num_epochs * num_batches * batch_size / wall_time\r\n self.report_benchmark(\r\n name=\"eager_train_%s\" %\r\n (\"gpu\" if tfe.num_gpus() > 0 else \"cpu\"),\r\n iters=num_epochs * num_batches,\r\n extras={\"examples_per_sec\": examples_per_sec},\r\n wall_time=wall_time)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n tf.enable_eager_execution()\r\n tf.test.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for the UniqueDataset serialization.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base\r\nfrom tensorflow.python.data.experimental.ops import unique\r\nfrom tensorflow.python.data.ops import dataset_ops\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\nclass UniqueDatasetSerializationTest(\r\n dataset_serialization_test_base.DatasetSerializationTestBase):\r\n\r\n def testUnique(self):\r\n\r\n def build_dataset(num_elements, unique_elem_range):\r\n return dataset_ops.Dataset.range(num_elements).map(\r\n lambda x: x % unique_elem_range).apply(unique.unique())\r\n\r\n self.run_core_tests(lambda: build_dataset(200, 100), 100)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Large tests for metric_ops.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nfrom six.moves import xrange # pylint: disable=redefined-builtin\r\nfrom tensorflow.contrib.metrics.python.ops import metric_ops\r\nfrom tensorflow.python.framework import dtypes as dtypes_lib\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import random_ops\r\nfrom tensorflow.python.ops import variables\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\nclass StreamingPrecisionRecallAtEqualThresholdsLargeTest(test.TestCase):\r\n\r\n def setUp(self):\r\n np.random.seed(1)\r\n ops.reset_default_graph()\r\n\r\n def testLargeCase(self):\r\n shape = [32, 512, 256, 1]\r\n predictions = random_ops.random_uniform(\r\n shape, 0.0, 1.0, dtype=dtypes_lib.float32)\r\n labels = math_ops.greater(random_ops.random_uniform(shape, 0.0, 1.0), 0.5)\r\n\r\n result, update_op = metric_ops.precision_recall_at_equal_thresholds(\r\n labels=labels, predictions=predictions, num_thresholds=201)\r\n # Run many updates, enough to cause highly inaccurate values if the\r\n # code used float32 for accumulation.\r\n num_updates = 71\r\n\r\n with self.cached_session() as sess:\r\n sess.run(variables.local_variables_initializer())\r\n for _ in xrange(num_updates):\r\n sess.run(update_op)\r\n\r\n prdata = sess.run(result)\r\n\r\n # Since we use random values, we won't know the tp/fp/tn/fn values, but\r\n # tp and fp at threshold 0 should be the total number of positive and\r\n # negative labels, hence their sum should be total number of pixels.\r\n expected_value = 1.0 * np.product(shape) * num_updates\r\n got_value = prdata.tp[0] + prdata.fp[0]\r\n # They should be at least within 1.\r\n self.assertNear(got_value, expected_value, 1.0)\r\n\r\nif __name__ == '__main__':\r\n test.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Python wrappers for reader Datasets.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.data.ops import dataset_ops\r\nfrom tensorflow.python.data.util import convert\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.framework import tensor_spec\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import gen_dataset_ops\r\nfrom tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n# TODO(b/64974358): Increase default buffer size to 256 MB.\r\n_DEFAULT_READER_BUFFER_SIZE_BYTES = 256 * 1024 # 256 KB\r\n\r\n\r\ndef _create_or_validate_filenames_dataset(filenames):\r\n \"\"\"Creates (or validates) a dataset of filenames.\r\n\r\n Args:\r\n filenames: Either a list or dataset of filenames. If it is a list, it is\r\n convert to a dataset. If it is a dataset, its type and shape is validated.\r\n\r\n Returns:\r\n A dataset of filenames.\r\n \"\"\"\r\n if isinstance(filenames, dataset_ops.DatasetV2):\r\n if dataset_ops.get_legacy_output_types(filenames) != dtypes.string:\r\n raise TypeError(\r\n \"`filenames` must be a `tf.data.Dataset` of `tf.string` elements.\")\r\n if not dataset_ops.get_legacy_output_shapes(filenames).is_compatible_with(\r\n tensor_shape.TensorShape([])):\r\n raise TypeError(\r\n \"`filenames` must be a `tf.data.Dataset` of scalar `tf.string` \"\r\n \"elements.\")\r\n else:\r\n filenames = ops.convert_to_tensor(filenames, dtype=dtypes.string)\r\n filenames = array_ops.reshape(filenames, [-1], name=\"flat_filenames\")\r\n filenames = dataset_ops.DatasetV2.from_tensor_slices(filenames)\r\n\r\n return filenames\r\n\r\n\r\ndef _create_dataset_reader(dataset_creator, filenames, num_parallel_reads=None):\r\n \"\"\"Creates a dataset that reads the given files using the given reader.\r\n\r\n Args:\r\n dataset_creator: A function that takes in a single file name and returns a\r\n dataset.\r\n filenames: A `tf.data.Dataset` containing one or more filenames.\r\n num_parallel_reads: The number of parallel reads we should do.\r\n\r\n Returns:\r\n A `Dataset` that reads data from `filenames`.\r\n \"\"\"\r\n def read_one_file(filename):\r\n filename = ops.convert_to_tensor(filename, dtypes.string, name=\"filename\")\r\n return dataset_creator(filename)\r\n\r\n if num_parallel_reads is None:\r\n return filenames.flat_map(read_one_file)\r\n else:\r\n return ParallelInterleaveDataset(\r\n filenames, read_one_file, cycle_length=num_parallel_reads,\r\n block_length=1, sloppy=False, buffer_output_elements=None,\r\n prefetch_input_elements=None)\r\n\r\n\r\nclass _TextLineDataset(dataset_ops.DatasetSource):\r\n \"\"\"A `Dataset` comprising records from one or more text files.\"\"\"\r\n\r\n def __init__(self, filenames, compression_type=None, buffer_size=None):\r\n \"\"\"Creates a `TextLineDataset`.\r\n\r\n Args:\r\n filenames: A `tf.string` tensor containing one or more filenames.\r\n compression_type: (Optional.) A `tf.string` scalar evaluating to one of\r\n `\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`.\r\n buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes\r\n to buffer. A value of 0 results in the default buffering values chosen\r\n based on the compression type.\r\n \"\"\"\r\n self._filenames = filenames\r\n self._compression_type = convert.optional_param_to_tensor(\r\n \"compression_type\",\r\n compression_type,\r\n argument_default=\"\",\r\n argument_dtype=dtypes.string)\r\n self._buffer_size = convert.optional_param_to_tensor(\r\n \"buffer_size\",\r\n buffer_size,\r\n argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES)\r\n variant_tensor = gen_dataset_ops.text_line_dataset(\r\n self._filenames, self._compression_type, self._buffer_size)\r\n super(_TextLineDataset, self).__init__(variant_tensor)\r\n\r\n @property\r\n def element_spec(self):\r\n return tensor_spec.TensorSpec([], dtypes.string)\r\n\r\n\r\n@tf_export(\"data.TextLineDataset\", v1=[])\r\nclass TextLineDatasetV2(dataset_ops.DatasetSource):\r\n \"\"\"A `Dataset` comprising lines from one or more text files.\"\"\"\r\n\r\n def __init__(self, filenames, compression_type=None, buffer_size=None,\r\n num_parallel_reads=None):\r\n \"\"\"Creates a `TextLineDataset`.\r\n\r\n Args:\r\n filenames: A `tf.string` tensor or `tf.data.Dataset` containing one or\r\n more filenames.\r\n compression_type: (Optional.) A `tf.string` scalar evaluating to one of\r\n `\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`.\r\n buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes\r\n to buffer. A value of 0 results in the default buffering values chosen\r\n based on the compression type.\r\n num_parallel_reads: (Optional.) A `tf.int64` scalar representing the\r\n number of files to read in parallel. If greater than one, the records of\r\n files read in parallel are outputted in an interleaved order. If your\r\n input pipeline is I/O bottlenecked, consider setting this parameter to a\r\n value greater than one to parallelize the I/O. If `None`, files will be\r\n read sequentially.\r\n \"\"\"\r\n filenames = _create_or_validate_filenames_dataset(filenames)\r\n self._filenames = filenames\r\n self._compression_type = compression_type\r\n self._buffer_size = buffer_size\r\n\r\n def creator_fn(filename):\r\n return _TextLineDataset(filename, compression_type, buffer_size)\r\n\r\n self._impl = _create_dataset_reader(creator_fn, filenames,\r\n num_parallel_reads)\r\n variant_tensor = self._impl._variant_tensor # pylint: disable=protected-access\r\n\r\n super(TextLineDatasetV2, self).__init__(variant_tensor)\r\n\r\n @property\r\n def element_spec(self):\r\n return tensor_spec.TensorSpec([], dtypes.string)\r\n\r\n\r\n@tf_export(v1=[\"data.TextLineDataset\"])\r\nclass TextLineDatasetV1(dataset_ops.DatasetV1Adapter):\r\n \"\"\"A `Dataset` comprising lines from one or more text files.\"\"\"\r\n\r\n def __init__(self, filenames, compression_type=None, buffer_size=None,\r\n num_parallel_reads=None):\r\n wrapped = TextLineDatasetV2(filenames, compression_type, buffer_size,\r\n num_parallel_reads)\r\n super(TextLineDatasetV1, self).__init__(wrapped)\r\n __init__.__doc__ = TextLineDatasetV2.__init__.__doc__\r\n\r\n @property\r\n def _filenames(self):\r\n return self._dataset._filenames # pylint: disable=protected-access\r\n\r\n @_filenames.setter\r\n def _filenames(self, value):\r\n self._dataset._filenames = value # pylint: disable=protected-access\r\n\r\n\r\nclass _TFRecordDataset(dataset_ops.DatasetSource):\r\n \"\"\"A `Dataset` comprising records from one or more TFRecord files.\"\"\"\r\n\r\n def __init__(self, filenames, compression_type=None, buffer_size=None):\r\n \"\"\"Creates a `TFRecordDataset`.\r\n\r\n Args:\r\n filenames: A `tf.string` tensor containing one or more filenames.\r\n compression_type: (Optional.) A `tf.string` scalar evaluating to one of\r\n `\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`.\r\n buffer_size: (Optional.) A `tf.int64` scalar representing the number of\r\n bytes in the read buffer. 0 means no buffering.\r\n \"\"\"\r\n self._filenames = filenames\r\n self._compression_type = convert.optional_param_to_tensor(\r\n \"compression_type\",\r\n compression_type,\r\n argument_default=\"\",\r\n argument_dtype=dtypes.string)\r\n self._buffer_size = convert.optional_param_to_tensor(\r\n \"buffer_size\",\r\n buffer_size,\r\n argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES)\r\n variant_tensor = gen_dataset_ops.tf_record_dataset(\r\n self._filenames, self._compression_type, self._buffer_size)\r\n super(_TFRecordDataset, self).__init__(variant_tensor)\r\n\r\n @property\r\n def element_spec(self):\r\n return tensor_spec.TensorSpec([], dtypes.string)\r\n\r\n\r\nclass ParallelInterleaveDataset(dataset_ops.UnaryDataset):\r\n \"\"\"A `Dataset` that maps a function over its input and flattens the result.\"\"\"\r\n\r\n def __init__(self, input_dataset, map_func, cycle_length, block_length,\r\n sloppy, buffer_output_elements, prefetch_input_elements):\r\n \"\"\"See `tf.data.experimental.parallel_interleave()` for details.\"\"\"\r\n self._input_dataset = input_dataset\r\n self._map_func = dataset_ops.StructuredFunctionWrapper(\r\n map_func, self._transformation_name(), dataset=input_dataset)\r\n if not isinstance(self._map_func.output_structure, dataset_ops.DatasetSpec):\r\n raise TypeError(\"`map_func` must return a `Dataset` object.\")\r\n self._element_spec = self._map_func.output_structure._element_spec # pylint: disable=protected-access\r\n self._cycle_length = ops.convert_to_tensor(\r\n cycle_length, dtype=dtypes.int64, name=\"cycle_length\")\r\n self._block_length = ops.convert_to_tensor(\r\n block_length, dtype=dtypes.int64, name=\"block_length\")\r\n self._sloppy = ops.convert_to_tensor(\r\n sloppy, dtype=dtypes.bool, name=\"sloppy\")\r\n self._buffer_output_elements = convert.optional_param_to_tensor(\r\n \"buffer_output_elements\",\r\n buffer_output_elements,\r\n argument_default=2 * block_length)\r\n self._prefetch_input_elements = convert.optional_param_to_tensor(\r\n \"prefetch_input_elements\",\r\n prefetch_input_elements,\r\n argument_default=2 * cycle_length)\r\n variant_tensor = ged_ops.parallel_interleave_dataset(\r\n self._input_dataset._variant_tensor, # pylint: disable=protected-access\r\n self._map_func.function.captured_inputs,\r\n self._cycle_length,\r\n self._block_length,\r\n self._sloppy,\r\n self._buffer_output_elements,\r\n self._prefetch_input_elements,\r\n f=self._map_func.function,\r\n **self._flat_structure)\r\n super(ParallelInterleaveDataset, self).__init__(input_dataset,\r\n variant_tensor)\r\n\r\n def _functions(self):\r\n return [self._map_func]\r\n\r\n @property\r\n def element_spec(self):\r\n return self._element_spec\r\n\r\n def _transformation_name(self):\r\n return \"tf.data.experimental.parallel_interleave()\"\r\n\r\n\r\n@tf_export(\"data.TFRecordDataset\", v1=[])\r\nclass TFRecordDatasetV2(dataset_ops.DatasetV2):\r\n \"\"\"A `Dataset` comprising records from one or more TFRecord files.\"\"\"\r\n\r\n def __init__(self, filenames, compression_type=None, buffer_size=None,\r\n num_parallel_reads=None):\r\n \"\"\"Creates a `TFRecordDataset` to read one or more TFRecord files.\r\n\r\n Args:\r\n filenames: A `tf.string` tensor or `tf.data.Dataset` containing one or\r\n more filenames.\r\n compression_type: (Optional.) A `tf.string` scalar evaluating to one of\r\n `\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`.\r\n buffer_size: (Optional.) A `tf.int64` scalar representing the number of\r\n bytes in the read buffer. If your input pipeline is I/O bottlenecked,\r\n consider setting this parameter to a value 1-100 MBs. If `None`, a\r\n sensible default for both local and remote file systems is used.\r\n num_parallel_reads: (Optional.) A `tf.int64` scalar representing the\r\n number of files to read in parallel. If greater than one, the records of\r\n files read in parallel are outputted in an interleaved order. If your\r\n input pipeline is I/O bottlenecked, consider setting this parameter to a\r\n value greater than one to parallelize the I/O. If `None`, files will be\r\n read sequentially.\r\n\r\n Raises:\r\n TypeError: If any argument does not have the expected type.\r\n ValueError: If any argument does not have the expected shape.\r\n \"\"\"\r\n filenames = _create_or_validate_filenames_dataset(filenames)\r\n\r\n self._filenames = filenames\r\n self._compression_type = compression_type\r\n self._buffer_size = buffer_size\r\n self._num_parallel_reads = num_parallel_reads\r\n\r\n def creator_fn(filename):\r\n return _TFRecordDataset(filename, compression_type, buffer_size)\r\n\r\n self._impl = _create_dataset_reader(creator_fn, filenames,\r\n num_parallel_reads)\r\n variant_tensor = self._impl._variant_tensor # pylint: disable=protected-access\r\n super(TFRecordDatasetV2, self).__init__(variant_tensor)\r\n\r\n def _clone(self,\r\n filenames=None,\r\n compression_type=None,\r\n buffer_size=None,\r\n num_parallel_reads=None):\r\n return TFRecordDatasetV2(filenames or self._filenames,\r\n compression_type or self._compression_type,\r\n buffer_size or self._buffer_size,\r\n num_parallel_reads or self._num_parallel_reads)\r\n\r\n def _inputs(self):\r\n return self._impl._inputs() # pylint: disable=protected-access\r\n\r\n @property\r\n def element_spec(self):\r\n return tensor_spec.TensorSpec([], dtypes.string)\r\n\r\n\r\n@tf_export(v1=[\"data.TFRecordDataset\"])\r\nclass TFRecordDatasetV1(dataset_ops.DatasetV1Adapter):\r\n \"\"\"A `Dataset` comprising records from one or more TFRecord files.\"\"\"\r\n\r\n def __init__(self, filenames, compression_type=None, buffer_size=None,\r\n num_parallel_reads=None):\r\n wrapped = TFRecordDatasetV2(\r\n filenames, compression_type, buffer_size, num_parallel_reads)\r\n super(TFRecordDatasetV1, self).__init__(wrapped)\r\n __init__.__doc__ = TFRecordDatasetV2.__init__.__doc__\r\n\r\n def _clone(self,\r\n filenames=None,\r\n compression_type=None,\r\n buffer_size=None,\r\n num_parallel_reads=None):\r\n # pylint: disable=protected-access\r\n return TFRecordDatasetV1(\r\n filenames or self._dataset._filenames,\r\n compression_type or self._dataset._compression_type,\r\n buffer_size or self._dataset._buffer_size,\r\n num_parallel_reads or self._dataset._num_parallel_reads)\r\n\r\n @property\r\n def _filenames(self):\r\n return self._dataset._filenames # pylint: disable=protected-access\r\n\r\n @_filenames.setter\r\n def _filenames(self, value):\r\n self._dataset._filenames = value # pylint: disable=protected-access\r\n\r\n\r\nclass _FixedLengthRecordDataset(dataset_ops.DatasetSource):\r\n \"\"\"A `Dataset` of fixed-length records from one or more binary files.\"\"\"\r\n\r\n def __init__(self,\r\n filenames,\r\n record_bytes,\r\n header_bytes=None,\r\n footer_bytes=None,\r\n buffer_size=None,\r\n compression_type=None):\r\n \"\"\"Creates a `FixedLengthRecordDataset`.\r\n\r\n Args:\r\n filenames: A `tf.string` tensor containing one or more filenames.\r\n record_bytes: A `tf.int64` scalar representing the number of bytes in\r\n each record.\r\n header_bytes: (Optional.) A `tf.int64` scalar representing the number of\r\n bytes to skip at the start of a file.\r\n footer_bytes: (Optional.) A `tf.int64` scalar representing the number of\r\n bytes to ignore at the end of a file.\r\n buffer_size: (Optional.) A `tf.int64` scalar representing the number of\r\n bytes to buffer when reading.\r\n compression_type: (Optional.) A `tf.string` scalar evaluating to one of\r\n `\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`.\r\n \"\"\"\r\n self._filenames = filenames\r\n self._record_bytes = ops.convert_to_tensor(\r\n record_bytes, dtype=dtypes.int64, name=\"record_bytes\")\r\n self._header_bytes = convert.optional_param_to_tensor(\r\n \"header_bytes\", header_bytes)\r\n self._footer_bytes = convert.optional_param_to_tensor(\r\n \"footer_bytes\", footer_bytes)\r\n self._buffer_size = convert.optional_param_to_tensor(\r\n \"buffer_size\", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)\r\n self._compression_type = convert.optional_param_to_tensor(\r\n \"compression_type\",\r\n compression_type,\r\n argument_default=\"\",\r\n argument_dtype=dtypes.string)\r\n variant_tensor = gen_dataset_ops.fixed_length_record_dataset_v2(\r\n self._filenames, self._header_bytes, self._record_bytes,\r\n self._footer_bytes, self._buffer_size, self._compression_type)\r\n super(_FixedLengthRecordDataset, self).__init__(variant_tensor)\r\n\r\n @property\r\n def element_spec(self):\r\n return tensor_spec.TensorSpec([], dtypes.string)\r\n\r\n\r\n@tf_export(\"data.FixedLengthRecordDataset\", v1=[])\r\nclass FixedLengthRecordDatasetV2(dataset_ops.DatasetSource):\r\n \"\"\"A `Dataset` of fixed-length records from one or more binary files.\"\"\"\r\n\r\n def __init__(self,\r\n filenames,\r\n record_bytes,\r\n header_bytes=None,\r\n footer_bytes=None,\r\n buffer_size=None,\r\n compression_type=None,\r\n num_parallel_reads=None):\r\n \"\"\"Creates a `FixedLengthRecordDataset`.\r\n\r\n Args:\r\n filenames: A `tf.string` tensor or `tf.data.Dataset` containing one or\r\n more filenames.\r\n record_bytes: A `tf.int64` scalar representing the number of bytes in\r\n each record.\r\n header_bytes: (Optional.) A `tf.int64` scalar representing the number of\r\n bytes to skip at the start of a file.\r\n footer_bytes: (Optional.) A `tf.int64` scalar representing the number of\r\n bytes to ignore at the end of a file.\r\n buffer_size: (Optional.) A `tf.int64` scalar representing the number of\r\n bytes to buffer when reading.\r\n compression_type: (Optional.) A `tf.string` scalar evaluating to one of\r\n `\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`.\r\n num_parallel_reads: (Optional.) A `tf.int64` scalar representing the\r\n number of files to read in parallel. If greater than one, the records of\r\n files read in parallel are outputted in an interleaved order. If your\r\n input pipeline is I/O bottlenecked, consider setting this parameter to a\r\n value greater than one to parallelize the I/O. If `None`, files will be\r\n read sequentially.\r\n \"\"\"\r\n filenames = _create_or_validate_filenames_dataset(filenames)\r\n\r\n self._filenames = filenames\r\n self._record_bytes = record_bytes\r\n self._header_bytes = header_bytes\r\n self._footer_bytes = footer_bytes\r\n self._buffer_size = buffer_size\r\n self._compression_type = compression_type\r\n\r\n def creator_fn(filename):\r\n return _FixedLengthRecordDataset(filename, record_bytes, header_bytes,\r\n footer_bytes, buffer_size,\r\n compression_type)\r\n\r\n self._impl = _create_dataset_reader(creator_fn, filenames,\r\n num_parallel_reads)\r\n variant_tensor = self._impl._variant_tensor # pylint: disable=protected-access\r\n super(FixedLengthRecordDatasetV2, self).__init__(variant_tensor)\r\n\r\n @property\r\n def element_spec(self):\r\n return tensor_spec.TensorSpec([], dtypes.string)\r\n\r\n\r\n@tf_export(v1=[\"data.FixedLengthRecordDataset\"])\r\nclass FixedLengthRecordDatasetV1(dataset_ops.DatasetV1Adapter):\r\n \"\"\"A `Dataset` of fixed-length records from one or more binary files.\"\"\"\r\n\r\n def __init__(self,\r\n filenames,\r\n record_bytes,\r\n header_bytes=None,\r\n footer_bytes=None,\r\n buffer_size=None,\r\n compression_type=None,\r\n num_parallel_reads=None):\r\n wrapped = FixedLengthRecordDatasetV2(\r\n filenames, record_bytes, header_bytes, footer_bytes, buffer_size,\r\n compression_type, num_parallel_reads)\r\n super(FixedLengthRecordDatasetV1, self).__init__(wrapped)\r\n __init__.__doc__ = FixedLengthRecordDatasetV2.__init__.__doc__\r\n\r\n @property\r\n def _filenames(self):\r\n return self._dataset._filenames # pylint: disable=protected-access\r\n\r\n @_filenames.setter\r\n def _filenames(self, value):\r\n self._dataset._filenames = value # pylint: disable=protected-access\r\n\r\n\r\n# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep\r\n# these aliases in place.\r\nFixedLengthRecordDataset = FixedLengthRecordDatasetV1\r\nTFRecordDataset = TFRecordDatasetV1\r\nTextLineDataset = TextLineDatasetV1\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for tf.GrpcServer.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.client import session\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import variables\r\nfrom tensorflow.python.platform import test\r\nfrom tensorflow.python.training import server_lib\r\n\r\n\r\nclass SameVariablesNoClearTest(test.TestCase):\r\n\r\n # Verifies behavior of multiple variables with multiple sessions connecting to\r\n # the same server.\r\n # TODO(b/34465411): Starting multiple servers with different configurations\r\n # in the same test is flaky. Move this test case back into\r\n # \"server_lib_test.py\" when this is no longer the case.\r\n @test_util.run_v1_only(\"b/120545219\")\r\n def testSameVariablesNoClear(self):\r\n server = server_lib.Server.create_local_server()\r\n\r\n with session.Session(server.target) as sess_1:\r\n v0 = variables.VariableV1([[2, 1]], name=\"v0\")\r\n v1 = variables.VariableV1([[1], [2]], name=\"v1\")\r\n v2 = math_ops.matmul(v0, v1)\r\n sess_1.run([v0.initializer, v1.initializer])\r\n self.assertAllEqual([[4]], sess_1.run(v2))\r\n\r\n with session.Session(server.target) as sess_2:\r\n new_v0 = ops.get_default_graph().get_tensor_by_name(\"v0:0\")\r\n new_v1 = ops.get_default_graph().get_tensor_by_name(\"v1:0\")\r\n new_v2 = math_ops.matmul(new_v0, new_v1)\r\n self.assertAllEqual([[4]], sess_2.run(new_v2))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for Permute bijector.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\n\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.permute import Permute\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\nclass PermuteBijectorTest(test.TestCase):\r\n \"\"\"Tests correctness of the Permute bijector.\"\"\"\r\n\r\n def setUp(self):\r\n self._rng = np.random.RandomState(42)\r\n\r\n def testBijector(self):\r\n expected_permutation = np.int32([2, 0, 1])\r\n expected_x = np.random.randn(4, 2, 3)\r\n expected_y = expected_x[..., expected_permutation]\r\n\r\n with self.cached_session() as sess:\r\n permutation_ph = array_ops.placeholder(dtype=dtypes.int32)\r\n bijector = Permute(\r\n permutation=permutation_ph,\r\n validate_args=True)\r\n [\r\n permutation_,\r\n x_,\r\n y_,\r\n fldj,\r\n ildj,\r\n ] = sess.run([\r\n bijector.permutation,\r\n bijector.inverse(expected_y),\r\n bijector.forward(expected_x),\r\n bijector.forward_log_det_jacobian(expected_x, event_ndims=1),\r\n bijector.inverse_log_det_jacobian(expected_y, event_ndims=1),\r\n ], feed_dict={permutation_ph: expected_permutation})\r\n self.assertEqual(\"permute\", bijector.name)\r\n self.assertAllEqual(expected_permutation, permutation_)\r\n self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)\r\n self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)\r\n self.assertAllClose(0., fldj, rtol=1e-6, atol=0)\r\n self.assertAllClose(0., ildj, rtol=1e-6, atol=0)\r\n\r\n def testRaisesOpError(self):\r\n with self.cached_session() as sess:\r\n with self.assertRaisesOpError(\"Permutation over `d` must contain\"):\r\n permutation_ph = array_ops.placeholder(dtype=dtypes.int32)\r\n bijector = Permute(\r\n permutation=permutation_ph,\r\n validate_args=True)\r\n sess.run(bijector.inverse([1.]),\r\n feed_dict={permutation_ph: [1, 2]})\r\n\r\n def testBijectiveAndFinite(self):\r\n permutation = np.int32([2, 0, 1])\r\n x = np.random.randn(4, 2, 3)\r\n y = x[..., permutation]\r\n with self.cached_session():\r\n bijector = Permute(permutation=permutation, validate_args=True)\r\n assert_bijective_and_finite(\r\n bijector, x, y, event_ndims=1, rtol=1e-6, atol=0)\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Weighted Alternating Least Squares (WALS) on the tf.learn API.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.contrib.factorization.python.ops import factorization_ops\r\nfrom tensorflow.contrib.learn.python.learn.estimators import estimator\r\nfrom tensorflow.contrib.learn.python.learn.estimators import model_fn\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import state_ops\r\nfrom tensorflow.python.ops import variable_scope\r\nfrom tensorflow.python.platform import tf_logging as logging\r\nfrom tensorflow.python.summary import summary\r\nfrom tensorflow.python.training import session_run_hook\r\nfrom tensorflow.python.training import training_util\r\n\r\n\r\nclass _SweepHook(session_run_hook.SessionRunHook):\r\n \"\"\"Keeps track of row/col sweeps, and runs prep ops before each sweep.\"\"\"\r\n\r\n def __init__(self, is_row_sweep_var, is_sweep_done_var, init_op,\r\n row_prep_ops, col_prep_ops, row_train_op, col_train_op,\r\n switch_op):\r\n \"\"\"Initializes SweepHook.\r\n\r\n Args:\r\n is_row_sweep_var: A Boolean tf.Variable, determines whether we are\r\n currently doing a row or column sweep. It is updated by the hook.\r\n is_sweep_done_var: A Boolean tf.Variable, determines whether we are\r\n starting a new sweep (this is used to determine when to run the prep ops\r\n below).\r\n init_op: op to be run once before training. This is typically a local\r\n initialization op (such as cache initialization).\r\n row_prep_ops: A list of TensorFlow ops, to be run before the beginning of\r\n each row sweep (and during initialization), in the given order.\r\n col_prep_ops: A list of TensorFlow ops, to be run before the beginning of\r\n each column sweep (and during initialization), in the given order.\r\n row_train_op: A TensorFlow op to be run during row sweeps.\r\n col_train_op: A TensorFlow op to be run during column sweeps.\r\n switch_op: A TensorFlow op to be run before each sweep.\r\n \"\"\"\r\n self._is_row_sweep_var = is_row_sweep_var\r\n self._is_sweep_done_var = is_sweep_done_var\r\n self._init_op = init_op\r\n self._row_prep_ops = row_prep_ops\r\n self._col_prep_ops = col_prep_ops\r\n self._row_train_op = row_train_op\r\n self._col_train_op = col_train_op\r\n self._switch_op = switch_op\r\n # Boolean variable that determines whether the init_op has been run.\r\n self._is_initialized = False\r\n\r\n def before_run(self, run_context):\r\n \"\"\"Runs the appropriate prep ops, and requests running update ops.\"\"\"\r\n sess = run_context.session\r\n is_sweep_done = sess.run(self._is_sweep_done_var)\r\n if not self._is_initialized:\r\n logging.info(\"SweepHook running init op.\")\r\n sess.run(self._init_op)\r\n if is_sweep_done:\r\n logging.info(\"SweepHook starting the next sweep.\")\r\n sess.run(self._switch_op)\r\n is_row_sweep = sess.run(self._is_row_sweep_var)\r\n if is_sweep_done or not self._is_initialized:\r\n logging.info(\"SweepHook running prep ops for the {} sweep.\".format(\r\n \"row\" if is_row_sweep else \"col\"))\r\n prep_ops = self._row_prep_ops if is_row_sweep else self._col_prep_ops\r\n for prep_op in prep_ops:\r\n sess.run(prep_op)\r\n self._is_initialized = True\r\n logging.info(\"Next fit step starting.\")\r\n return session_run_hook.SessionRunArgs(\r\n fetches=[self._row_train_op if is_row_sweep else self._col_train_op])\r\n\r\n\r\nclass _IncrementGlobalStepHook(session_run_hook.SessionRunHook):\r\n \"\"\"Hook that increments the global step.\"\"\"\r\n\r\n def __init__(self):\r\n global_step = training_util.get_global_step()\r\n if global_step:\r\n self._global_step_incr_op = state_ops.assign_add(\r\n global_step, 1, name=\"global_step_incr\").op\r\n else:\r\n self._global_step_incr_op = None\r\n\r\n def before_run(self, run_context):\r\n if self._global_step_incr_op:\r\n run_context.session.run(self._global_step_incr_op)\r\n\r\n\r\nclass _StopAtSweepHook(session_run_hook.SessionRunHook):\r\n \"\"\"Hook that requests stop at a given sweep.\"\"\"\r\n\r\n def __init__(self, last_sweep):\r\n \"\"\"Initializes a `StopAtSweepHook`.\r\n\r\n This hook requests stop at a given sweep. Relies on the tensor named\r\n COMPLETED_SWEEPS in the default graph.\r\n\r\n Args:\r\n last_sweep: Integer, number of the last sweep to run.\r\n \"\"\"\r\n self._last_sweep = last_sweep\r\n\r\n def begin(self):\r\n try:\r\n self._completed_sweeps_var = ops.get_default_graph().get_tensor_by_name(\r\n WALSMatrixFactorization.COMPLETED_SWEEPS + \":0\")\r\n except KeyError:\r\n raise RuntimeError(WALSMatrixFactorization.COMPLETED_SWEEPS +\r\n \" counter should be created to use StopAtSweepHook.\")\r\n\r\n def before_run(self, run_context):\r\n return session_run_hook.SessionRunArgs(self._completed_sweeps_var)\r\n\r\n def after_run(self, run_context, run_values):\r\n completed_sweeps = run_values.results\r\n if completed_sweeps >= self._last_sweep:\r\n run_context.request_stop()\r\n\r\n\r\ndef _wals_factorization_model_function(features, labels, mode, params):\r\n \"\"\"Model function for the WALSFactorization estimator.\r\n\r\n Args:\r\n features: Dictionary of features. See WALSMatrixFactorization.\r\n labels: Must be None.\r\n mode: A model_fn.ModeKeys object.\r\n params: Dictionary of parameters containing arguments passed to the\r\n WALSMatrixFactorization constructor.\r\n\r\n Returns:\r\n A ModelFnOps object.\r\n\r\n Raises:\r\n ValueError: If `mode` is not recognized.\r\n \"\"\"\r\n assert labels is None\r\n use_factors_weights_cache = (params[\"use_factors_weights_cache_for_training\"]\r\n and mode == model_fn.ModeKeys.TRAIN)\r\n use_gramian_cache = (params[\"use_gramian_cache_for_training\"] and\r\n mode == model_fn.ModeKeys.TRAIN)\r\n max_sweeps = params[\"max_sweeps\"]\r\n model = factorization_ops.WALSModel(\r\n params[\"num_rows\"],\r\n params[\"num_cols\"],\r\n params[\"embedding_dimension\"],\r\n unobserved_weight=params[\"unobserved_weight\"],\r\n regularization=params[\"regularization_coeff\"],\r\n row_init=params[\"row_init\"],\r\n col_init=params[\"col_init\"],\r\n num_row_shards=params[\"num_row_shards\"],\r\n num_col_shards=params[\"num_col_shards\"],\r\n row_weights=params[\"row_weights\"],\r\n col_weights=params[\"col_weights\"],\r\n use_factors_weights_cache=use_factors_weights_cache,\r\n use_gramian_cache=use_gramian_cache)\r\n\r\n # Get input rows and cols. We either update rows or columns depending on\r\n # the value of row_sweep, which is maintained using a session hook.\r\n input_rows = features[WALSMatrixFactorization.INPUT_ROWS]\r\n input_cols = features[WALSMatrixFactorization.INPUT_COLS]\r\n\r\n # TRAIN mode:\r\n if mode == model_fn.ModeKeys.TRAIN:\r\n # Training consists of the following ops (controlled using a SweepHook).\r\n # Before a row sweep:\r\n # row_update_prep_gramian_op\r\n # initialize_row_update_op\r\n # During a row sweep:\r\n # update_row_factors_op\r\n # Before a col sweep:\r\n # col_update_prep_gramian_op\r\n # initialize_col_update_op\r\n # During a col sweep:\r\n # update_col_factors_op\r\n\r\n is_row_sweep_var = variable_scope.variable(\r\n True,\r\n trainable=False,\r\n name=\"is_row_sweep\",\r\n collections=[ops.GraphKeys.GLOBAL_VARIABLES])\r\n is_sweep_done_var = variable_scope.variable(\r\n False,\r\n trainable=False,\r\n name=\"is_sweep_done\",\r\n collections=[ops.GraphKeys.GLOBAL_VARIABLES])\r\n completed_sweeps_var = variable_scope.variable(\r\n 0,\r\n trainable=False,\r\n name=WALSMatrixFactorization.COMPLETED_SWEEPS,\r\n collections=[ops.GraphKeys.GLOBAL_VARIABLES])\r\n loss_var = variable_scope.variable(\r\n 0.,\r\n trainable=False,\r\n name=WALSMatrixFactorization.LOSS,\r\n collections=[ops.GraphKeys.GLOBAL_VARIABLES])\r\n # The root weighted squared error =\r\n # \\\\(\\sqrt( \\sum_{i,j} w_ij * (a_ij - r_ij)^2 / \\sum_{i,j} w_ij )\\\\)\r\n rwse_var = variable_scope.variable(\r\n 0.,\r\n trainable=False,\r\n name=WALSMatrixFactorization.RWSE,\r\n collections=[ops.GraphKeys.GLOBAL_VARIABLES])\r\n\r\n summary.scalar(\"loss\", loss_var)\r\n summary.scalar(\"root_weighted_squared_error\", rwse_var)\r\n summary.scalar(\"completed_sweeps\", completed_sweeps_var)\r\n\r\n def create_axis_ops(sp_input, num_items, update_fn, axis_name):\r\n \"\"\"Creates book-keeping and training ops for a given axis.\r\n\r\n Args:\r\n sp_input: A SparseTensor corresponding to the row or column batch.\r\n num_items: An integer, the total number of items of this axis.\r\n update_fn: A function that takes one argument (`sp_input`), and that\r\n returns a tuple of\r\n * new_factors: A float Tensor of the factor values after update.\r\n * update_op: a TensorFlow op which updates the factors.\r\n * loss: A float Tensor, the unregularized loss.\r\n * reg_loss: A float Tensor, the regularization loss.\r\n * sum_weights: A float Tensor, the sum of factor weights.\r\n axis_name: A string that specifies the name of the axis.\r\n\r\n Returns:\r\n A tuple consisting of:\r\n * reset_processed_items_op: A TensorFlow op, to be run before the\r\n beginning of any sweep. It marks all items as not-processed.\r\n * axis_train_op: A Tensorflow op, to be run during this axis' sweeps.\r\n \"\"\"\r\n processed_items_init = array_ops.fill(dims=[num_items], value=False)\r\n with ops.colocate_with(processed_items_init):\r\n processed_items = variable_scope.variable(\r\n processed_items_init,\r\n collections=[ops.GraphKeys.GLOBAL_VARIABLES],\r\n trainable=False,\r\n name=\"processed_\" + axis_name)\r\n _, update_op, loss, reg, sum_weights = update_fn(sp_input)\r\n input_indices = sp_input.indices[:, 0]\r\n with ops.control_dependencies([\r\n update_op,\r\n state_ops.assign(loss_var, loss + reg),\r\n state_ops.assign(rwse_var, math_ops.sqrt(loss / sum_weights))]):\r\n with ops.colocate_with(processed_items):\r\n update_processed_items = state_ops.scatter_update(\r\n processed_items,\r\n input_indices,\r\n array_ops.ones_like(input_indices, dtype=dtypes.bool),\r\n name=\"update_processed_{}_indices\".format(axis_name))\r\n with ops.control_dependencies([update_processed_items]):\r\n is_sweep_done = math_ops.reduce_all(processed_items)\r\n axis_train_op = control_flow_ops.group(\r\n state_ops.assign(is_sweep_done_var, is_sweep_done),\r\n state_ops.assign_add(\r\n completed_sweeps_var,\r\n math_ops.cast(is_sweep_done, dtypes.int32)),\r\n name=\"{}_sweep_train_op\".format(axis_name))\r\n return processed_items.initializer, axis_train_op\r\n\r\n reset_processed_rows_op, row_train_op = create_axis_ops(\r\n input_rows,\r\n params[\"num_rows\"],\r\n lambda x: model.update_row_factors(sp_input=x, transpose_input=False),\r\n \"rows\")\r\n reset_processed_cols_op, col_train_op = create_axis_ops(\r\n input_cols,\r\n params[\"num_cols\"],\r\n lambda x: model.update_col_factors(sp_input=x, transpose_input=True),\r\n \"cols\")\r\n switch_op = control_flow_ops.group(\r\n state_ops.assign(\r\n is_row_sweep_var, math_ops.logical_not(is_row_sweep_var)),\r\n reset_processed_rows_op,\r\n reset_processed_cols_op,\r\n name=\"sweep_switch_op\")\r\n row_prep_ops = [\r\n model.row_update_prep_gramian_op, model.initialize_row_update_op]\r\n col_prep_ops = [\r\n model.col_update_prep_gramian_op, model.initialize_col_update_op]\r\n init_op = model.worker_init\r\n sweep_hook = _SweepHook(\r\n is_row_sweep_var, is_sweep_done_var, init_op,\r\n row_prep_ops, col_prep_ops, row_train_op, col_train_op, switch_op)\r\n global_step_hook = _IncrementGlobalStepHook()\r\n training_hooks = [sweep_hook, global_step_hook]\r\n if max_sweeps is not None:\r\n training_hooks.append(_StopAtSweepHook(max_sweeps))\r\n\r\n return model_fn.ModelFnOps(\r\n mode=model_fn.ModeKeys.TRAIN,\r\n predictions={},\r\n loss=loss_var,\r\n eval_metric_ops={},\r\n train_op=control_flow_ops.no_op(),\r\n training_hooks=training_hooks)\r\n\r\n # INFER mode\r\n elif mode == model_fn.ModeKeys.INFER:\r\n projection_weights = features.get(\r\n WALSMatrixFactorization.PROJECTION_WEIGHTS)\r\n\r\n def get_row_projection():\r\n return model.project_row_factors(\r\n sp_input=input_rows,\r\n projection_weights=projection_weights,\r\n transpose_input=False)\r\n\r\n def get_col_projection():\r\n return model.project_col_factors(\r\n sp_input=input_cols,\r\n projection_weights=projection_weights,\r\n transpose_input=True)\r\n\r\n predictions = {\r\n WALSMatrixFactorization.PROJECTION_RESULT: control_flow_ops.cond(\r\n features[WALSMatrixFactorization.PROJECT_ROW],\r\n get_row_projection,\r\n get_col_projection)\r\n }\r\n\r\n return model_fn.ModelFnOps(\r\n mode=model_fn.ModeKeys.INFER,\r\n predictions=predictions,\r\n loss=None,\r\n eval_metric_ops={},\r\n train_op=control_flow_ops.no_op(),\r\n training_hooks=[])\r\n\r\n # EVAL mode\r\n elif mode == model_fn.ModeKeys.EVAL:\r\n def get_row_loss():\r\n _, _, loss, reg, _ = model.update_row_factors(\r\n sp_input=input_rows, transpose_input=False)\r\n return loss + reg\r\n def get_col_loss():\r\n _, _, loss, reg, _ = model.update_col_factors(\r\n sp_input=input_cols, transpose_input=True)\r\n return loss + reg\r\n loss = control_flow_ops.cond(\r\n features[WALSMatrixFactorization.PROJECT_ROW],\r\n get_row_loss,\r\n get_col_loss)\r\n return model_fn.ModelFnOps(\r\n mode=model_fn.ModeKeys.EVAL,\r\n predictions={},\r\n loss=loss,\r\n eval_metric_ops={},\r\n train_op=control_flow_ops.no_op(),\r\n training_hooks=[])\r\n\r\n else:\r\n raise ValueError(\"mode=%s is not recognized.\" % str(mode))\r\n\r\n\r\nclass WALSMatrixFactorization(estimator.Estimator):\r\n \"\"\"An Estimator for Weighted Matrix Factorization, using the WALS method.\r\n\r\n WALS (Weighted Alternating Least Squares) is an algorithm for weighted matrix\r\n factorization. It computes a low-rank approximation of a given sparse (n x m)\r\n matrix `A`, by a product of two matrices, `U * V^T`, where `U` is a (n x k)\r\n matrix and `V` is a (m x k) matrix. Here k is the rank of the approximation,\r\n also called the embedding dimension. We refer to `U` as the row factors, and\r\n `V` as the column factors.\r\n See tensorflow/contrib/factorization/g3doc/wals.md for the precise problem\r\n formulation.\r\n\r\n The training proceeds in sweeps: during a row_sweep, we fix `V` and solve for\r\n `U`. During a column sweep, we fix `U` and solve for `V`. Each one of these\r\n problems is an unconstrained quadratic minimization problem and can be solved\r\n exactly (it can also be solved in mini-batches, since the solution decouples\r\n across rows of each matrix).\r\n The alternating between sweeps is achieved by using a hook during training,\r\n which is responsible for keeping track of the sweeps and running preparation\r\n ops at the beginning of each sweep. It also updates the global_step variable,\r\n which keeps track of the number of batches processed since the beginning of\r\n training.\r\n The current implementation assumes that the training is run on a single\r\n machine, and will fail if `config.num_worker_replicas` is not equal to one.\r\n Training is done by calling `self.fit(input_fn=input_fn)`, where `input_fn`\r\n provides two tensors: one for rows of the input matrix, and one for rows of\r\n the transposed input matrix (i.e. columns of the original matrix). Note that\r\n during a row sweep, only row batches are processed (ignoring column batches)\r\n and vice-versa.\r\n Also note that every row (respectively every column) of the input matrix\r\n must be processed at least once for the sweep to be considered complete. In\r\n particular, training will not make progress if some rows are not generated by\r\n the `input_fn`.\r\n\r\n For prediction, given a new set of input rows `A'`, we compute a corresponding\r\n set of row factors `U'`, such that `U' * V^T` is a good approximation of `A'`.\r\n We call this operation a row projection. A similar operation is defined for\r\n columns. Projection is done by calling\r\n `self.get_projections(input_fn=input_fn)`, where `input_fn` satisfies the\r\n constraints given below.\r\n\r\n The input functions must satisfy the following constraints: Calling `input_fn`\r\n must return a tuple `(features, labels)` where `labels` is None, and\r\n `features` is a dict containing the following keys:\r\n\r\n TRAIN:\r\n * `WALSMatrixFactorization.INPUT_ROWS`: float32 SparseTensor (matrix).\r\n Rows of the input matrix to process (or to project).\r\n * `WALSMatrixFactorization.INPUT_COLS`: float32 SparseTensor (matrix).\r\n Columns of the input matrix to process (or to project), transposed.\r\n\r\n INFER:\r\n * `WALSMatrixFactorization.INPUT_ROWS`: float32 SparseTensor (matrix).\r\n Rows to project.\r\n * `WALSMatrixFactorization.INPUT_COLS`: float32 SparseTensor (matrix).\r\n Columns to project.\r\n * `WALSMatrixFactorization.PROJECT_ROW`: Boolean Tensor. Whether to project\r\n the rows or columns.\r\n * `WALSMatrixFactorization.PROJECTION_WEIGHTS` (Optional): float32 Tensor\r\n (vector). The weights to use in the projection.\r\n\r\n EVAL:\r\n * `WALSMatrixFactorization.INPUT_ROWS`: float32 SparseTensor (matrix).\r\n Rows to project.\r\n * `WALSMatrixFactorization.INPUT_COLS`: float32 SparseTensor (matrix).\r\n Columns to project.\r\n * `WALSMatrixFactorization.PROJECT_ROW`: Boolean Tensor. Whether to project\r\n the rows or columns.\r\n \"\"\"\r\n # Keys to be used in model_fn\r\n # Features keys\r\n INPUT_ROWS = \"input_rows\"\r\n INPUT_COLS = \"input_cols\"\r\n PROJECT_ROW = \"project_row\"\r\n PROJECTION_WEIGHTS = \"projection_weights\"\r\n # Predictions key\r\n PROJECTION_RESULT = \"projection\"\r\n # Name of the completed_sweeps variable\r\n COMPLETED_SWEEPS = \"completed_sweeps\"\r\n # Name of the loss variable\r\n LOSS = \"WALS_loss\"\r\n # Name of the Root Weighted Squared Error variable\r\n RWSE = \"WALS_RWSE\"\r\n\r\n def __init__(self,\r\n num_rows,\r\n num_cols,\r\n embedding_dimension,\r\n unobserved_weight=0.1,\r\n regularization_coeff=None,\r\n row_init=\"random\",\r\n col_init=\"random\",\r\n num_row_shards=1,\r\n num_col_shards=1,\r\n row_weights=1,\r\n col_weights=1,\r\n use_factors_weights_cache_for_training=True,\r\n use_gramian_cache_for_training=True,\r\n max_sweeps=None,\r\n model_dir=None,\r\n config=None):\r\n r\"\"\"Creates a model for matrix factorization using the WALS method.\r\n\r\n Args:\r\n num_rows: Total number of rows for input matrix.\r\n num_cols: Total number of cols for input matrix.\r\n embedding_dimension: Dimension to use for the factors.\r\n unobserved_weight: Weight of the unobserved entries of matrix.\r\n regularization_coeff: Weight of the L2 regularization term. Defaults to\r\n None, in which case the problem is not regularized.\r\n row_init: Initializer for row factor. Must be either:\r\n - A tensor: The row factor matrix is initialized to this tensor,\r\n - A numpy constant,\r\n - \"random\": The rows are initialized using a normal distribution.\r\n col_init: Initializer for column factor. See row_init.\r\n num_row_shards: Number of shards to use for the row factors.\r\n num_col_shards: Number of shards to use for the column factors.\r\n row_weights: Must be in one of the following three formats:\r\n - None: In this case, the weight of every entry is the unobserved_weight\r\n and the problem simplifies to ALS. Note that, in this case,\r\n col_weights must also be set to \"None\".\r\n - List of lists of non-negative scalars, of the form\r\n \\\\([[w_0, w_1, ...], [w_k, ... ], [...]]\\\\),\r\n where the number of inner lists equal to the number of row factor\r\n shards and the elements in each inner list are the weights for the\r\n rows of that shard. In this case,\r\n \\\\(w_ij = unonbserved_weight + row_weights[i] * col_weights[j]\\\\).\r\n - A non-negative scalar: This value is used for all row weights.\r\n Note that it is allowed to have row_weights as a list and col_weights\r\n as a scalar, or vice-versa.\r\n col_weights: See row_weights.\r\n use_factors_weights_cache_for_training: Boolean, whether the factors and\r\n weights will be cached on the workers before the updates start, during\r\n training. Defaults to True.\r\n Note that caching is disabled during prediction.\r\n use_gramian_cache_for_training: Boolean, whether the Gramians will be\r\n cached on the workers before the updates start, during training.\r\n Defaults to True. Note that caching is disabled during prediction.\r\n max_sweeps: integer, optional. Specifies the number of sweeps for which\r\n to train the model, where a sweep is defined as a full update of all the\r\n row factors (resp. column factors).\r\n If `steps` or `max_steps` is also specified in model.fit(), training\r\n stops when either of the steps condition or sweeps condition is met.\r\n model_dir: The directory to save the model results and log files.\r\n config: A Configuration object. See Estimator.\r\n\r\n Raises:\r\n ValueError: If config.num_worker_replicas is strictly greater than one.\r\n The current implementation only supports running on a single worker.\r\n \"\"\"\r\n # TODO(walidk): Support power-law based weight computation.\r\n # TODO(walidk): Add factor lookup by indices, with caching.\r\n # TODO(walidk): Support caching during prediction.\r\n # TODO(walidk): Provide input pipelines that handle missing rows.\r\n\r\n params = {\r\n \"num_rows\":\r\n num_rows,\r\n \"num_cols\":\r\n num_cols,\r\n \"embedding_dimension\":\r\n embedding_dimension,\r\n \"unobserved_weight\":\r\n unobserved_weight,\r\n \"regularization_coeff\":\r\n regularization_coeff,\r\n \"row_init\":\r\n row_init,\r\n \"col_init\":\r\n col_init,\r\n \"num_row_shards\":\r\n num_row_shards,\r\n \"num_col_shards\":\r\n num_col_shards,\r\n \"row_weights\":\r\n row_weights,\r\n \"col_weights\":\r\n col_weights,\r\n \"max_sweeps\":\r\n max_sweeps,\r\n \"use_factors_weights_cache_for_training\":\r\n use_factors_weights_cache_for_training,\r\n \"use_gramian_cache_for_training\":\r\n use_gramian_cache_for_training\r\n }\r\n self._row_factors_names = [\r\n \"row_factors_shard_%d\" % i for i in range(num_row_shards)\r\n ]\r\n self._col_factors_names = [\r\n \"col_factors_shard_%d\" % i for i in range(num_col_shards)\r\n ]\r\n\r\n super(WALSMatrixFactorization, self).__init__(\r\n model_fn=_wals_factorization_model_function,\r\n params=params,\r\n model_dir=model_dir,\r\n config=config)\r\n\r\n if self._config is not None and self._config.num_worker_replicas > 1:\r\n raise ValueError(\"WALSMatrixFactorization must be run on a single worker \"\r\n \"replica.\")\r\n\r\n def get_row_factors(self):\r\n \"\"\"Returns the row factors of the model, loading them from checkpoint.\r\n\r\n Should only be run after training.\r\n\r\n Returns:\r\n A list of the row factors of the model.\r\n \"\"\"\r\n return [self.get_variable_value(name) for name in self._row_factors_names]\r\n\r\n def get_col_factors(self):\r\n \"\"\"Returns the column factors of the model, loading them from checkpoint.\r\n\r\n Should only be run after training.\r\n\r\n Returns:\r\n A list of the column factors of the model.\r\n \"\"\"\r\n return [self.get_variable_value(name) for name in self._col_factors_names]\r\n\r\n def get_projections(self, input_fn):\r\n \"\"\"Computes the projections of the rows or columns given in input_fn.\r\n\r\n Runs predict() with the given input_fn, and returns the results. Should only\r\n be run after training.\r\n\r\n Args:\r\n input_fn: Input function which specifies the rows or columns to project.\r\n Returns:\r\n A generator of the projected factors.\r\n \"\"\"\r\n return (result[WALSMatrixFactorization.PROJECTION_RESULT]\r\n for result in self.predict(input_fn=input_fn))\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Gradients for operators defined in random_ops.py.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import gen_random_ops\r\nfrom tensorflow.python.ops import math_ops\r\n\r\n\r\ndef add_leading_unit_dimensions(x, num_dimensions):\r\n new_shape = array_ops.concat(\r\n [array_ops.ones([num_dimensions], dtype=dtypes.int32),\r\n array_ops.shape(x)], axis=0)\r\n return array_ops.reshape(x, new_shape)\r\n\r\n\r\[email protected](\"RandomGamma\")\r\ndef _RandomGammaGrad(op, grad): # pylint: disable=invalid-name\r\n \"\"\"Returns the gradient of a Gamma sample w.r.t. alpha.\r\n\r\n The gradient is computed using implicit differentiation, see\r\n \"Implicit Reparameterization Gradients\" (https://arxiv.org/abs/1805.08498).\r\n\r\n Args:\r\n op: A `RandomGamma` operation. We assume that the inputs to the operation\r\n are `shape` and `alpha` tensors, and the output is the `sample` tensor.\r\n grad: The incoming gradient `dloss / dsample` of the same shape as\r\n `op.outputs[0]`.\r\n\r\n Returns:\r\n A `Tensor` with derivatives `dloss / dalpha`\r\n \"\"\"\r\n shape = op.inputs[0]\r\n alpha = op.inputs[1]\r\n sample = op.outputs[0]\r\n\r\n with ops.control_dependencies([grad]):\r\n # Make the parameters alpha broadcastable with samples by appending\r\n # unit dimensions.\r\n num_sample_dimensions = array_ops.shape(shape)[0]\r\n alpha_broadcastable = add_leading_unit_dimensions(\r\n alpha, num_sample_dimensions)\r\n partial_a = gen_random_ops.random_gamma_grad(alpha_broadcastable, sample)\r\n\r\n # The first input is shape; the second input is alpha.\r\n return (None, math_ops.reduce_sum(\r\n grad * partial_a, axis=math_ops.range(num_sample_dimensions)))\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Export a TensorFlow model.\r\n\r\nSee: go/tf-exporter\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport re\r\nimport six\r\n\r\nfrom google.protobuf.any_pb2 import Any\r\n\r\nfrom tensorflow.contrib.session_bundle import constants\r\nfrom tensorflow.contrib.session_bundle import gc\r\nfrom tensorflow.contrib.session_bundle import manifest_pb2\r\nfrom tensorflow.core.framework import graph_pb2\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.platform import gfile\r\nfrom tensorflow.python.platform import tf_logging as logging\r\nfrom tensorflow.python.training import saver as tf_saver\r\nfrom tensorflow.python.training import training_util\r\nfrom tensorflow.python.util import compat\r\nfrom tensorflow.python.util.deprecation import deprecated\r\n\r\n\r\n@deprecated(\"2017-06-30\",\r\n \"No longer supported. Switch to SavedModel immediately.\")\r\ndef gfile_copy_callback(files_to_copy, export_dir_path):\r\n \"\"\"Callback to copy files using `gfile.copy` to an export directory.\r\n\r\n This method is used as the default `assets_callback` in `Exporter.init` to\r\n copy assets from the `assets_collection`. It can also be invoked directly to\r\n copy additional supplementary files into the export directory (in which case\r\n it is not a callback).\r\n\r\n Args:\r\n files_to_copy: A dictionary that maps original file paths to desired\r\n basename in the export directory.\r\n export_dir_path: Directory to copy the files to.\r\n \"\"\"\r\n logging.info(\"Write assets into: %s using gfile_copy.\", export_dir_path)\r\n gfile.MakeDirs(export_dir_path)\r\n for source_filepath, basename in files_to_copy.items():\r\n new_path = os.path.join(\r\n compat.as_bytes(export_dir_path), compat.as_bytes(basename))\r\n logging.info(\"Copying asset %s to path %s.\", source_filepath, new_path)\r\n\r\n if gfile.Exists(new_path):\r\n # Guard against being restarted while copying assets, and the file\r\n # existing and being in an unknown state.\r\n # TODO(b/28676216): Do some file checks before deleting.\r\n logging.info(\"Removing file %s.\", new_path)\r\n gfile.Remove(new_path)\r\n gfile.Copy(source_filepath, new_path)\r\n\r\n\r\n@deprecated(\"2017-06-30\",\r\n \"No longer supported. Switch to SavedModel immediately.\")\r\ndef regression_signature(input_tensor, output_tensor):\r\n \"\"\"Creates a regression signature.\r\n\r\n Args:\r\n input_tensor: Tensor specifying the input to a graph.\r\n output_tensor: Tensor specifying the output of a graph.\r\n\r\n Returns:\r\n A Signature message.\r\n \"\"\"\r\n signature = manifest_pb2.Signature()\r\n signature.regression_signature.input.tensor_name = input_tensor.name\r\n signature.regression_signature.output.tensor_name = output_tensor.name\r\n return signature\r\n\r\n\r\n@deprecated(\"2017-06-30\",\r\n \"No longer supported. Switch to SavedModel immediately.\")\r\ndef classification_signature(input_tensor,\r\n classes_tensor=None,\r\n scores_tensor=None):\r\n \"\"\"Creates a classification signature.\r\n\r\n Args:\r\n input_tensor: Tensor specifying the input to a graph.\r\n classes_tensor: Tensor specifying the output classes of a graph.\r\n scores_tensor: Tensor specifying the scores of the output classes.\r\n\r\n Returns:\r\n A Signature message.\r\n \"\"\"\r\n signature = manifest_pb2.Signature()\r\n signature.classification_signature.input.tensor_name = input_tensor.name\r\n if classes_tensor is not None:\r\n signature.classification_signature.classes.tensor_name = classes_tensor.name\r\n if scores_tensor is not None:\r\n signature.classification_signature.scores.tensor_name = scores_tensor.name\r\n return signature\r\n\r\n\r\n@deprecated(\"2017-06-30\",\r\n \"No longer supported. Switch to SavedModel immediately.\")\r\ndef generic_signature(name_tensor_map):\r\n \"\"\"Creates a generic signature of name to Tensor name.\r\n\r\n Args:\r\n name_tensor_map: Map from logical name to Tensor.\r\n\r\n Returns:\r\n A Signature message.\r\n \"\"\"\r\n signature = manifest_pb2.Signature()\r\n for name, tensor in six.iteritems(name_tensor_map):\r\n signature.generic_signature.map[name].tensor_name = tensor.name\r\n return signature\r\n\r\n\r\nclass Exporter(object):\r\n \"\"\"Exporter helps package a TensorFlow model for serving.\r\n\r\n Args:\r\n saver: Saver object.\r\n \"\"\"\r\n\r\n def __init__(self, saver):\r\n # Makes a copy of the saver-def and disables garbage-collection, since the\r\n # exporter enforces garbage-collection independently. Specifically, since\r\n # the exporter performs atomic copies of the saver output, it is required\r\n # that garbage-collection via the underlying saver be disabled.\r\n saver_def = saver.as_saver_def()\r\n saver_def.ClearField(\"max_to_keep\")\r\n self._saver = tf_saver.Saver(saver_def=saver_def)\r\n self._has_init = False\r\n self._assets_to_copy = {}\r\n\r\n @deprecated(\"2017-06-30\",\r\n \"No longer supported. Switch to SavedModel immediately.\")\r\n def init(self,\r\n graph_def=None,\r\n init_op=None,\r\n clear_devices=False,\r\n default_graph_signature=None,\r\n named_graph_signatures=None,\r\n assets_collection=None,\r\n assets_callback=gfile_copy_callback):\r\n \"\"\"Initialization.\r\n\r\n Args:\r\n graph_def: A GraphDef message of the graph to be used in inference.\r\n GraphDef of default graph is used when None.\r\n init_op: Op to be used in initialization.\r\n clear_devices: If device info of the graph should be cleared upon export.\r\n default_graph_signature: Default signature of the graph.\r\n named_graph_signatures: Map of named input/output signatures of the graph.\r\n assets_collection: A collection of constant asset filepath tensors. If set\r\n the assets will be exported into the asset directory.\r\n assets_callback: callback with two argument called during export with the\r\n list of files to copy and the asset path.\r\n Raises:\r\n RuntimeError: if init is called more than once.\r\n TypeError: if init_op is not an Operation or None.\r\n ValueError: if asset file path tensors are not non-empty constant string\r\n scalar tensors.\r\n \"\"\"\r\n # Avoid Dangerous default value []\r\n if named_graph_signatures is None:\r\n named_graph_signatures = {}\r\n assets = []\r\n if assets_collection:\r\n for asset_tensor in assets_collection:\r\n asset_filepath = self._file_path_value(asset_tensor)\r\n if not asset_filepath:\r\n raise ValueError(\"invalid asset filepath tensor %s\" % asset_tensor)\r\n basename = os.path.basename(asset_filepath)\r\n assets.append((basename, asset_tensor))\r\n self._assets_to_copy[asset_filepath] = basename\r\n\r\n if self._has_init:\r\n raise RuntimeError(\"init should be called only once\")\r\n self._has_init = True\r\n\r\n if graph_def or clear_devices:\r\n copy = graph_pb2.GraphDef()\r\n if graph_def:\r\n copy.CopyFrom(graph_def)\r\n else:\r\n copy.CopyFrom(ops.get_default_graph().as_graph_def())\r\n if clear_devices:\r\n for node in copy.node:\r\n node.device = \"\"\r\n graph_any_buf = Any()\r\n graph_any_buf.Pack(copy)\r\n ops.add_to_collection(constants.GRAPH_KEY, graph_any_buf)\r\n\r\n if init_op:\r\n if not isinstance(init_op, ops.Operation):\r\n raise TypeError(\"init_op needs to be an Operation: %s\" % init_op)\r\n ops.add_to_collection(constants.INIT_OP_KEY, init_op)\r\n\r\n signatures_proto = manifest_pb2.Signatures()\r\n if default_graph_signature:\r\n signatures_proto.default_signature.CopyFrom(default_graph_signature)\r\n for signature_name, signature in six.iteritems(named_graph_signatures):\r\n signatures_proto.named_signatures[signature_name].CopyFrom(signature)\r\n signatures_any_buf = Any()\r\n signatures_any_buf.Pack(signatures_proto)\r\n ops.add_to_collection(constants.SIGNATURES_KEY, signatures_any_buf)\r\n\r\n for filename, tensor in assets:\r\n asset = manifest_pb2.AssetFile()\r\n asset.filename = filename\r\n asset.tensor_binding.tensor_name = tensor.name\r\n asset_any_buf = Any()\r\n asset_any_buf.Pack(asset)\r\n ops.add_to_collection(constants.ASSETS_KEY, asset_any_buf)\r\n\r\n self._assets_callback = assets_callback\r\n\r\n @deprecated(\"2017-06-30\",\r\n \"No longer supported. Switch to SavedModel immediately.\")\r\n def export(self,\r\n export_dir_base,\r\n global_step_tensor,\r\n sess=None,\r\n exports_to_keep=None):\r\n \"\"\"Exports the model.\r\n\r\n Args:\r\n export_dir_base: A string path to the base export dir.\r\n global_step_tensor: An Tensor or tensor name providing the\r\n global step counter to append to the export directory path and set\r\n in the manifest version.\r\n sess: A Session to use to save the parameters.\r\n exports_to_keep: a gc.Path filter function used to determine the set of\r\n exports to keep. If set to None, all versions will be kept.\r\n\r\n Returns:\r\n The string path to the exported directory.\r\n\r\n Raises:\r\n RuntimeError: if init is not called.\r\n RuntimeError: if the export would overwrite an existing directory.\r\n \"\"\"\r\n if not self._has_init:\r\n raise RuntimeError(\"init must be called first\")\r\n\r\n # Export dir must not end with / or it will break exports to keep. Strip /.\r\n if export_dir_base.endswith(\"/\"):\r\n export_dir_base = export_dir_base[:-1]\r\n\r\n global_step = training_util.global_step(sess, global_step_tensor)\r\n export_dir = os.path.join(\r\n compat.as_bytes(export_dir_base),\r\n compat.as_bytes(constants.VERSION_FORMAT_SPECIFIER % global_step))\r\n\r\n # Prevent overwriting on existing exports which could lead to bad/corrupt\r\n # storage and loading of models. This is an important check that must be\r\n # done before any output files or directories are created.\r\n if gfile.Exists(export_dir):\r\n raise RuntimeError(\"Overwriting exports can cause corruption and are \"\r\n \"not allowed. Duplicate export dir: %s\" % export_dir)\r\n\r\n # Output to a temporary directory which is atomically renamed to the final\r\n # directory when complete.\r\n tmp_export_dir = compat.as_text(export_dir) + \"-tmp\"\r\n gfile.MakeDirs(tmp_export_dir)\r\n\r\n self._saver.save(\r\n sess,\r\n os.path.join(\r\n compat.as_text(tmp_export_dir),\r\n compat.as_text(constants.EXPORT_BASE_NAME)),\r\n meta_graph_suffix=constants.EXPORT_SUFFIX_NAME)\r\n\r\n # Run the asset callback.\r\n if self._assets_callback and self._assets_to_copy:\r\n assets_dir = os.path.join(\r\n compat.as_bytes(tmp_export_dir),\r\n compat.as_bytes(constants.ASSETS_DIRECTORY))\r\n gfile.MakeDirs(assets_dir)\r\n self._assets_callback(self._assets_to_copy, assets_dir)\r\n\r\n # TODO(b/27794910): Delete *checkpoint* file before rename.\r\n gfile.Rename(tmp_export_dir, export_dir)\r\n\r\n if exports_to_keep:\r\n # create a simple parser that pulls the export_version from the directory.\r\n def parser(path):\r\n if os.name == \"nt\":\r\n match = re.match(\r\n r\"^\" + export_dir_base.replace(\"\\\\\", \"/\") + r\"/(\\d{8})$\",\r\n path.path.replace(\"\\\\\", \"/\"))\r\n else:\r\n match = re.match(r\"^\" + export_dir_base + r\"/(\\d{8})$\", path.path)\r\n if not match:\r\n return None\r\n return path._replace(export_version=int(match.group(1)))\r\n\r\n paths_to_delete = gc.negation(exports_to_keep)\r\n for p in paths_to_delete(gc.get_paths(export_dir_base, parser=parser)):\r\n gfile.DeleteRecursively(p.path)\r\n\r\n return export_dir\r\n\r\n def _file_path_value(self, path_tensor):\r\n \"\"\"Returns the filepath value stored in constant `path_tensor`.\"\"\"\r\n if not isinstance(path_tensor, ops.Tensor):\r\n raise TypeError(\"tensor is not a Tensor\")\r\n if path_tensor.op.type != \"Const\":\r\n raise TypeError(\"Only constants tensor are supported\")\r\n if path_tensor.dtype != dtypes.string:\r\n raise TypeError(\"File paths should be string\")\r\n str_value = path_tensor.op.get_attr(\"value\").string_val\r\n if len(str_value) != 1:\r\n raise TypeError(\"Only scalar tensors are supported\")\r\n return str_value[0]\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Utilities to warm-start TF.Learn Estimators.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport six\r\n\r\nfrom tensorflow.python.framework import errors\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import state_ops\r\nfrom tensorflow.python.ops import variable_scope\r\nfrom tensorflow.python.ops import variables as variables_lib\r\nfrom tensorflow.python.platform import tf_logging as logging\r\nfrom tensorflow.python.training import checkpoint_ops\r\nfrom tensorflow.python.training import checkpoint_utils\r\nfrom tensorflow.python.training import saver as saver_lib\r\nfrom tensorflow.python.training.saving import saveable_object_util\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n@tf_export(v1=[\"train.VocabInfo\"])\r\nclass VocabInfo(\r\n collections.namedtuple(\"VocabInfo\", [\r\n \"new_vocab\",\r\n \"new_vocab_size\",\r\n \"num_oov_buckets\",\r\n \"old_vocab\",\r\n \"old_vocab_size\",\r\n \"backup_initializer\",\r\n \"axis\",\r\n ])):\r\n \"\"\"Vocabulary information for warm-starting.\r\n\r\n See `tf.estimator.WarmStartSettings` for examples of using\r\n VocabInfo to warm-start.\r\n\r\n Args:\r\n new_vocab: [Required] A path to the new vocabulary file (used with the model\r\n to be trained).\r\n new_vocab_size: [Required] An integer indicating how many entries of the new\r\n vocabulary will used in training.\r\n num_oov_buckets: [Required] An integer indicating how many OOV buckets are\r\n associated with the vocabulary.\r\n old_vocab: [Required] A path to the old vocabulary file (used with the\r\n checkpoint to be warm-started from).\r\n old_vocab_size: [Optional] An integer indicating how many entries of the old\r\n vocabulary were used in the creation of the checkpoint. If not provided,\r\n the entire old vocabulary will be used.\r\n backup_initializer: [Optional] A variable initializer used for variables\r\n corresponding to new vocabulary entries and OOV. If not provided, these\r\n entries will be zero-initialized.\r\n axis: [Optional] Denotes what axis the vocabulary corresponds to. The\r\n default, 0, corresponds to the most common use case (embeddings or\r\n linear weights for binary classification / regression). An axis of 1\r\n could be used for warm-starting output layers with class vocabularies.\r\n\r\n Returns:\r\n A `VocabInfo` which represents the vocabulary information for warm-starting.\r\n\r\n Raises:\r\n ValueError: `axis` is neither 0 or 1.\r\n\r\n Example Usage:\r\n```python\r\n embeddings_vocab_info = tf.VocabInfo(\r\n new_vocab='embeddings_vocab',\r\n new_vocab_size=100,\r\n num_oov_buckets=1,\r\n old_vocab='pretrained_embeddings_vocab',\r\n old_vocab_size=10000,\r\n backup_initializer=tf.compat.v1.truncated_normal_initializer(\r\n mean=0.0, stddev=(1 / math.sqrt(embedding_dim))),\r\n axis=0)\r\n\r\n softmax_output_layer_kernel_vocab_info = tf.VocabInfo(\r\n new_vocab='class_vocab',\r\n new_vocab_size=5,\r\n num_oov_buckets=0, # No OOV for classes.\r\n old_vocab='old_class_vocab',\r\n old_vocab_size=8,\r\n backup_initializer=tf.compat.v1.glorot_uniform_initializer(),\r\n axis=1)\r\n\r\n softmax_output_layer_bias_vocab_info = tf.VocabInfo(\r\n new_vocab='class_vocab',\r\n new_vocab_size=5,\r\n num_oov_buckets=0, # No OOV for classes.\r\n old_vocab='old_class_vocab',\r\n old_vocab_size=8,\r\n backup_initializer=tf.compat.v1.zeros_initializer(),\r\n axis=0)\r\n\r\n #Currently, only axis=0 and axis=1 are supported.\r\n ```\r\n \"\"\"\r\n\r\n def __new__(cls,\r\n new_vocab,\r\n new_vocab_size,\r\n num_oov_buckets,\r\n old_vocab,\r\n old_vocab_size=-1,\r\n backup_initializer=None,\r\n axis=0):\r\n if axis != 0 and axis != 1:\r\n raise ValueError(\"The only supported values for the axis argument are 0 \"\r\n \"and 1. Provided axis: {}\".format(axis))\r\n\r\n return super(VocabInfo, cls).__new__(\r\n cls,\r\n new_vocab,\r\n new_vocab_size,\r\n num_oov_buckets,\r\n old_vocab,\r\n old_vocab_size,\r\n backup_initializer,\r\n axis,\r\n )\r\n\r\n\r\ndef _infer_var_name(var):\r\n \"\"\"Returns name of the `var`.\r\n\r\n Args:\r\n var: A list. The list can contain either of the following:\r\n (i) A single `Variable`\r\n (ii) A single `ResourceVariable`\r\n (iii) Multiple `Variable` objects which must be slices of the same larger\r\n variable.\r\n (iv) A single `PartitionedVariable`\r\n\r\n Returns:\r\n Name of the `var`\r\n \"\"\"\r\n name_to_var_dict = saveable_object_util.op_list_to_dict(var)\r\n if len(name_to_var_dict) > 1:\r\n raise TypeError(\"`var` = %s passed as arg violates the constraints. \"\r\n \"name_to_var_dict = %s\" % (var, name_to_var_dict))\r\n return list(name_to_var_dict.keys())[0]\r\n\r\n\r\ndef _get_var_info(var, prev_tensor_name=None):\r\n \"\"\"Helper method for standarizing Variable and naming.\r\n\r\n Args:\r\n var: Current graph's variable that needs to be warm-started (initialized).\r\n Can be either of the following: (i) `Variable` (ii) `ResourceVariable`\r\n (iii) list of `Variable`: The list must contain slices of the same larger\r\n variable. (iv) `PartitionedVariable`\r\n prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If\r\n None, we lookup tensor with same name as given `var`.\r\n\r\n Returns:\r\n A tuple of the Tensor name and var.\r\n \"\"\"\r\n if checkpoint_utils._is_variable(var): # pylint: disable=protected-access\r\n current_var_name = _infer_var_name([var])\r\n elif (isinstance(var, list) and\r\n all(checkpoint_utils._is_variable(v) for v in var)): # pylint: disable=protected-access\r\n current_var_name = _infer_var_name(var)\r\n elif isinstance(var, variables_lib.PartitionedVariable):\r\n current_var_name = _infer_var_name([var])\r\n var = var._get_variable_list() # pylint: disable=protected-access\r\n else:\r\n raise TypeError(\r\n \"var MUST be one of the following: a Variable, list of Variable or \"\r\n \"PartitionedVariable, but is {}\".format(type(var)))\r\n if not prev_tensor_name:\r\n # Assume tensor name remains the same.\r\n prev_tensor_name = current_var_name\r\n\r\n return prev_tensor_name, var\r\n\r\n\r\n# pylint: disable=protected-access\r\n# Accesses protected members of tf.Variable to reset the variable's internal\r\n# state.\r\ndef _warm_start_var_with_vocab(var,\r\n current_vocab_path,\r\n current_vocab_size,\r\n prev_ckpt,\r\n prev_vocab_path,\r\n previous_vocab_size=-1,\r\n current_oov_buckets=0,\r\n prev_tensor_name=None,\r\n initializer=None,\r\n axis=0):\r\n \"\"\"Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`.\r\n\r\n Use this method when the `var` is backed by vocabulary. This method stitches\r\n the given `var` such that values corresponding to individual features in the\r\n vocabulary remain consistent irrespective of changing order of the features\r\n between old and new vocabularies.\r\n\r\n Args:\r\n var: Current graph's variable that needs to be warm-started (initialized).\r\n Can be either of the following:\r\n (i) `Variable`\r\n (ii) `ResourceVariable`\r\n (iii) list of `Variable`: The list must contain slices of the same larger\r\n variable.\r\n (iv) `PartitionedVariable`\r\n current_vocab_path: Path to the vocab file used for the given `var`.\r\n current_vocab_size: An `int` specifying the number of entries in the current\r\n vocab.\r\n prev_ckpt: A string specifying the directory with checkpoint file(s) or path\r\n to checkpoint. The given checkpoint must have tensor with name\r\n `prev_tensor_name` (if not None) or tensor with name same as given `var`.\r\n prev_vocab_path: Path to the vocab file used for the tensor in `prev_ckpt`.\r\n previous_vocab_size: If provided, will constrain previous vocab to the first\r\n `previous_vocab_size` entries. -1 means use the entire previous vocab.\r\n current_oov_buckets: An `int` specifying the number of out-of-vocabulary\r\n buckets used for given `var`.\r\n prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If\r\n None, we lookup tensor with same name as given `var`.\r\n initializer: Variable initializer to be used for missing entries. If None,\r\n missing entries will be zero-initialized.\r\n axis: Axis of the variable that the provided vocabulary corresponds to.\r\n\r\n Raises:\r\n ValueError: If required args are not provided.\r\n \"\"\"\r\n if not (current_vocab_path and current_vocab_size and prev_ckpt and\r\n prev_vocab_path):\r\n raise ValueError(\"Invalid args: Must provide all of [current_vocab_path, \"\r\n \"current_vocab_size, prev_ckpt, prev_vocab_path}.\")\r\n if checkpoint_utils._is_variable(var):\r\n var = [var]\r\n elif (isinstance(var, list) and\r\n all(checkpoint_utils._is_variable(v) for v in var)):\r\n var = var\r\n elif isinstance(var, variables_lib.PartitionedVariable):\r\n var = var._get_variable_list()\r\n else:\r\n raise TypeError(\r\n \"var MUST be one of the following: a Variable, list of Variable or \"\r\n \"PartitionedVariable, but is {}\".format(type(var)))\r\n\r\n if not prev_tensor_name:\r\n # Assume tensor name remains the same.\r\n prev_tensor_name = _infer_var_name(var)\r\n\r\n total_v_first_axis = sum(v.get_shape().as_list()[0] for v in var)\r\n for v in var:\r\n v_shape = v.get_shape().as_list()\r\n slice_info = v._get_save_slice_info()\r\n partition_info = None\r\n if slice_info:\r\n partition_info = variable_scope._PartitionInfo(\r\n full_shape=slice_info.full_shape, var_offset=slice_info.var_offset)\r\n\r\n if axis == 0:\r\n new_row_vocab_size = current_vocab_size\r\n new_col_vocab_size = v_shape[1]\r\n old_row_vocab_size = previous_vocab_size\r\n old_row_vocab_file = prev_vocab_path\r\n new_row_vocab_file = current_vocab_path\r\n old_col_vocab_file = None\r\n new_col_vocab_file = None\r\n num_row_oov_buckets = current_oov_buckets\r\n num_col_oov_buckets = 0\r\n elif axis == 1:\r\n # Note that we must compute this value across all partitions, whereas\r\n # in the axis = 0 case, we can simply use v_shape[1] because we don't\r\n # allow partitioning across axis = 1.\r\n new_row_vocab_size = total_v_first_axis\r\n new_col_vocab_size = current_vocab_size\r\n old_row_vocab_size = -1\r\n old_row_vocab_file = None\r\n new_row_vocab_file = None\r\n old_col_vocab_file = prev_vocab_path\r\n new_col_vocab_file = current_vocab_path\r\n num_row_oov_buckets = 0\r\n num_col_oov_buckets = current_oov_buckets\r\n else:\r\n raise ValueError(\"The only supported values for the axis argument are 0 \"\r\n \"and 1. Provided axis: {}\".format(axis))\r\n\r\n init = checkpoint_ops._load_and_remap_matrix_initializer(\r\n ckpt_path=checkpoint_utils._get_checkpoint_filename(prev_ckpt),\r\n old_tensor_name=prev_tensor_name,\r\n new_row_vocab_size=new_row_vocab_size,\r\n new_col_vocab_size=new_col_vocab_size,\r\n old_row_vocab_size=old_row_vocab_size,\r\n old_row_vocab_file=old_row_vocab_file,\r\n new_row_vocab_file=new_row_vocab_file,\r\n old_col_vocab_file=old_col_vocab_file,\r\n new_col_vocab_file=new_col_vocab_file,\r\n num_row_oov_buckets=num_row_oov_buckets,\r\n num_col_oov_buckets=num_col_oov_buckets,\r\n initializer=initializer)\r\n new_init_val = ops.convert_to_tensor(\r\n init(shape=v_shape, partition_info=partition_info))\r\n v._initializer_op = state_ops.assign(v, new_init_val)\r\n\r\n\r\n# pylint: enable=protected-access\r\n\r\n\r\ndef _get_grouped_variables(vars_to_warm_start):\r\n \"\"\"Collects and groups (possibly partitioned) variables into a dictionary.\r\n\r\n The variables can be provided explicitly through vars_to_warm_start, or they\r\n are retrieved from collections (see below).\r\n\r\n Args:\r\n vars_to_warm_start: One of the following:\r\n\r\n - A regular expression (string) that captures which variables to\r\n warm-start (see tf.compat.v1.get_collection). This expression will\r\n only consider variables in the TRAINABLE_VARIABLES collection.\r\n - A list of strings, each representing a full variable name to warm-start.\r\n These will consider variables in GLOBAL_VARIABLES collection.\r\n - A list of Variables to warm-start.\r\n - `None`, in which case all variables in TRAINABLE_VARIABLES will be used.\r\n Returns:\r\n A dictionary mapping variable names (strings) to lists of Variables.\r\n Raises:\r\n ValueError: If vars_to_warm_start is not a string, `None`, a list of\r\n `Variables`, or a list of strings.\r\n \"\"\"\r\n if isinstance(vars_to_warm_start, str) or vars_to_warm_start is None:\r\n # Both vars_to_warm_start = '.*' and vars_to_warm_start = None will match\r\n # everything (in TRAINABLE_VARIABLES) here.\r\n logging.info(\"Warm-starting variables only in TRAINABLE_VARIABLES.\")\r\n list_of_vars = ops.get_collection(\r\n ops.GraphKeys.TRAINABLE_VARIABLES, scope=vars_to_warm_start)\r\n elif isinstance(vars_to_warm_start, list):\r\n if all(isinstance(v, str) for v in vars_to_warm_start):\r\n list_of_vars = []\r\n for v in vars_to_warm_start:\r\n list_of_vars += ops.get_collection(\r\n ops.GraphKeys.GLOBAL_VARIABLES, scope=v)\r\n elif all(checkpoint_utils._is_variable(v) for v in vars_to_warm_start): # pylint: disable=protected-access\r\n list_of_vars = vars_to_warm_start\r\n else:\r\n raise ValueError(\"If `vars_to_warm_start` is a list, it must be all \"\r\n \"`Variable` or all `str`. Given types are {}\".format(\r\n [type(v) for v in vars_to_warm_start]))\r\n else:\r\n raise ValueError(\"`vars_to_warm_start must be a `list` or `str`. Given \"\r\n \"type is {}\".format(type(vars_to_warm_start)))\r\n # We have to deal with partitioned variables, since get_collection flattens\r\n # out the list.\r\n grouped_variables = {}\r\n for v in list_of_vars:\r\n if not isinstance(v, list):\r\n var_name = _infer_var_name([v])\r\n else:\r\n var_name = _infer_var_name(v)\r\n grouped_variables.setdefault(var_name, []).append(v)\r\n\r\n return grouped_variables\r\n\r\n\r\ndef _get_object_checkpoint_renames(path, variable_names):\r\n \"\"\"Returns a dictionary mapping variable names to checkpoint keys.\r\n\r\n The warm-starting utility expects variable names to match with the variable\r\n names in the checkpoint. For object-based checkpoints, the variable names\r\n and names in the checkpoint are different. Thus, for object-based checkpoints,\r\n this function is used to obtain the map from variable names to checkpoint\r\n keys.\r\n\r\n Args:\r\n path: path to checkpoint directory or file.\r\n variable_names: list of variable names to load from the checkpoint.\r\n\r\n Returns:\r\n If the checkpoint is object-based, this function returns a map from variable\r\n names to their corresponding checkpoint keys.\r\n If the checkpoint is name-based, this returns an empty dict.\r\n\r\n Raises:\r\n ValueError: If the object-based checkpoint is missing variables.\r\n \"\"\"\r\n fname = checkpoint_utils._get_checkpoint_filename(path) # pylint: disable=protected-access\r\n try:\r\n names_to_keys = saver_lib.object_graph_key_mapping(fname)\r\n except errors.NotFoundError:\r\n # If an error is raised from `object_graph_key_mapping`, then the\r\n # checkpoint is name-based. There are no renames, so return an empty dict.\r\n return {}\r\n\r\n missing_names = set(variable_names) - set(names_to_keys.keys())\r\n if missing_names:\r\n raise ValueError(\r\n \"Attempting to warm-start from an object-based checkpoint, but found \"\r\n \"that the checkpoint did not contain values for all variables. The \"\r\n \"following variables were missing: {}\"\r\n .format(missing_names))\r\n return {name: names_to_keys[name] for name in variable_names}\r\n\r\n\r\n@tf_export(v1=[\"train.warm_start\"])\r\ndef warm_start(ckpt_to_initialize_from,\r\n vars_to_warm_start=\".*\",\r\n var_name_to_vocab_info=None,\r\n var_name_to_prev_var_name=None):\r\n \"\"\"Warm-starts a model using the given settings.\r\n\r\n If you are using a tf.estimator.Estimator, this will automatically be called\r\n during training.\r\n\r\n Args:\r\n ckpt_to_initialize_from: [Required] A string specifying the directory with\r\n checkpoint file(s) or path to checkpoint from which to warm-start the\r\n model parameters.\r\n vars_to_warm_start: [Optional] One of the following:\r\n\r\n - A regular expression (string) that captures which variables to\r\n warm-start (see tf.compat.v1.get_collection). This expression will only\r\n consider variables in the TRAINABLE_VARIABLES collection -- if you need\r\n to warm-start non_TRAINABLE vars (such as optimizer accumulators or\r\n batch norm statistics), please use the below option.\r\n - A list of strings, each a regex scope provided to\r\n tf.compat.v1.get_collection with GLOBAL_VARIABLES (please see\r\n tf.compat.v1.get_collection). For backwards compatibility reasons,\r\n this is separate from the single-string argument type.\r\n - A list of Variables to warm-start. If you do not have access to the\r\n `Variable` objects at the call site, please use the above option.\r\n - `None`, in which case only TRAINABLE variables specified in\r\n `var_name_to_vocab_info` will be warm-started.\r\n\r\n Defaults to `'.*'`, which warm-starts all variables in the\r\n TRAINABLE_VARIABLES collection. Note that this excludes variables such\r\n as accumulators and moving statistics from batch norm.\r\n var_name_to_vocab_info: [Optional] Dict of variable names (strings) to\r\n `tf.estimator.VocabInfo`. The variable names should be \"full\" variables,\r\n not the names of the partitions. If not explicitly provided, the variable\r\n is assumed to have no (changes to) vocabulary.\r\n var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to\r\n name of the previously-trained variable in `ckpt_to_initialize_from`. If\r\n not explicitly provided, the name of the variable is assumed to be same\r\n between previous checkpoint and current model. Note that this has no\r\n effect on the set of variables that is warm-started, and only controls\r\n name mapping (use `vars_to_warm_start` for controlling what variables to\r\n warm-start).\r\n\r\n Raises:\r\n ValueError: If the WarmStartSettings contains prev_var_name or VocabInfo\r\n configuration for variable names that are not used. This is to ensure\r\n a stronger check for variable configuration than relying on users to\r\n examine the logs.\r\n \"\"\"\r\n logging.info(\"Warm-starting from: {}\".format(ckpt_to_initialize_from))\r\n grouped_variables = _get_grouped_variables(vars_to_warm_start)\r\n\r\n if var_name_to_vocab_info is None:\r\n var_name_to_vocab_info = {}\r\n\r\n if not var_name_to_prev_var_name:\r\n # Detect whether the checkpoint is object-based, in which case the\r\n # var_name_to_prev_var_name dictionary should map variable names to\r\n # checkpoint keys. If the user has specified var_name_to_prev_var_name, we\r\n # do not override it.\r\n var_name_to_prev_var_name = _get_object_checkpoint_renames(\r\n ckpt_to_initialize_from, grouped_variables.keys())\r\n\r\n warmstarted_count = 0\r\n\r\n # Keep track of which var_names in var_name_to_prev_var_name and\r\n # var_name_to_vocab_info have been used. Err on the safer side by throwing an\r\n # exception if any are unused by the end of the loop. It is easy to misname\r\n # a variable during this configuration, in which case without this check, we\r\n # would fail to warm-start silently.\r\n prev_var_name_used = set()\r\n vocab_info_used = set()\r\n\r\n # Group the vocabless vars into one call to init_from_checkpoint.\r\n vocabless_vars = {}\r\n for var_name, variable in six.iteritems(grouped_variables):\r\n prev_var_name = var_name_to_prev_var_name.get(var_name)\r\n if prev_var_name:\r\n prev_var_name_used.add(var_name)\r\n vocab_info = var_name_to_vocab_info.get(var_name)\r\n if vocab_info:\r\n vocab_info_used.add(var_name)\r\n warmstarted_count += 1\r\n logging.debug(\r\n \"Warm-starting variable: {}; current_vocab: {} current_vocab_size: {}\"\r\n \" prev_vocab: {} prev_vocab_size: {} current_oov: {} prev_tensor: {}\"\r\n \" initializer: {}\".format(\r\n var_name, vocab_info.new_vocab, vocab_info.new_vocab_size,\r\n vocab_info.old_vocab, (vocab_info.old_vocab_size if\r\n vocab_info.old_vocab_size > 0 else \"All\"),\r\n vocab_info.num_oov_buckets, prev_var_name or \"Unchanged\",\r\n vocab_info.backup_initializer or \"zero-initialized\"))\r\n _warm_start_var_with_vocab(\r\n variable,\r\n current_vocab_path=vocab_info.new_vocab,\r\n current_vocab_size=vocab_info.new_vocab_size,\r\n prev_ckpt=ckpt_to_initialize_from,\r\n prev_vocab_path=vocab_info.old_vocab,\r\n previous_vocab_size=vocab_info.old_vocab_size,\r\n current_oov_buckets=vocab_info.num_oov_buckets,\r\n prev_tensor_name=prev_var_name,\r\n initializer=vocab_info.backup_initializer,\r\n axis=vocab_info.axis)\r\n else:\r\n # For the special value of vars_to_warm_start = None,\r\n # we only warm-start variables with explicitly specified vocabularies.\r\n if vars_to_warm_start:\r\n warmstarted_count += 1\r\n logging.debug(\"Warm-starting variable: {}; prev_var_name: {}\".format(\r\n var_name, prev_var_name or \"Unchanged\"))\r\n # Because we use a default empty list in grouped_variables, single\r\n # unpartitioned variables will be lists here, which we rectify in order\r\n # for init_from_checkpoint logic to work correctly.\r\n if len(variable) == 1:\r\n variable = variable[0]\r\n prev_tensor_name, var = _get_var_info(variable, prev_var_name)\r\n vocabless_vars[prev_tensor_name] = var\r\n\r\n checkpoint_utils.init_from_checkpoint(ckpt_to_initialize_from, vocabless_vars)\r\n prev_var_name_not_used = set(\r\n var_name_to_prev_var_name.keys()) - prev_var_name_used\r\n vocab_info_not_used = set(var_name_to_vocab_info.keys()) - vocab_info_used\r\n\r\n logging.info(\"Warm-started %d variables.\", warmstarted_count)\r\n\r\n if prev_var_name_not_used:\r\n raise ValueError(\r\n \"You provided the following variables in \"\r\n \"var_name_to_prev_var_name that were not used: \"\r\n \"{0}. Perhaps you misspelled them? Here is the list of viable \"\r\n \"variable names: {1}\".format(prev_var_name_not_used,\r\n grouped_variables.keys()))\r\n if vocab_info_not_used:\r\n raise ValueError(\r\n \"You provided the following variables in \"\r\n \"var_name_to_vocab_info that were not used: {0}. \"\r\n \" Perhaps you misspelled them? Here is the list of viable variable \"\r\n \"names: {1}\".format(vocab_info_not_used, grouped_variables.keys()))\r\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"TensorArray: a dynamically sized array of Tensors.\"\"\"\r\n# Mixture of pep8 and non-pep8 names, so disable pylint bad-name\r\n# pylint: disable=g-bad-name\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport contextlib\r\nimport traceback\r\nimport weakref\r\n\r\nfrom tensorflow.python.eager import context\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import errors_impl\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.framework import tensor_spec\r\nfrom tensorflow.python.framework import tensor_util\r\nfrom tensorflow.python.framework import type_spec\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import control_flow_util\r\nfrom tensorflow.python.ops import gen_control_flow_ops\r\nfrom tensorflow.python.ops import gen_data_flow_ops\r\nfrom tensorflow.python.ops import list_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.platform import tf_logging as logging\r\nfrom tensorflow.python.util import tf_should_use\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n# _GraphTensorArray accesses many of the hidden generated ops, but is in\r\n# fact built to wrap these methods.\r\n# pylint: disable=protected-access\r\nclass _GraphTensorArray(object):\r\n \"\"\"Graph-mode implementation of TensorArray.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n dtype,\r\n size=None,\r\n dynamic_size=None,\r\n clear_after_read=None,\r\n tensor_array_name=None,\r\n handle=None,\r\n flow=None,\r\n infer_shape=True,\r\n element_shape=None,\r\n colocate_with_first_write_call=True,\r\n name=None):\r\n \"\"\"Constructs a graph mode TensorArray.\r\n\r\n Args:\r\n dtype: (required) data type of the TensorArray.\r\n size: (optional) int32 scalar `Tensor`: the size of the TensorArray.\r\n Required if handle is not provided.\r\n dynamic_size: (optional) Python bool: If true, writes to the TensorArray\r\n can grow the TensorArray past its initial size. Default: False.\r\n clear_after_read: Boolean (optional, default: True). If True, clear\r\n TensorArray values after reading them. This disables read-many\r\n semantics, but allows early release of memory.\r\n tensor_array_name: (optional) Python string: the name of the TensorArray.\r\n This is used when creating the TensorArray handle. If this value is\r\n set, handle should be None.\r\n handle: (optional) A `Tensor` handle to an existing TensorArray. If this\r\n is set, tensor_array_name should be None. Only supported in graph mode.\r\n flow: (optional) A float `Tensor` scalar coming from an existing\r\n `TensorArray.flow`. Only supported in graph mode.\r\n infer_shape: (optional, default: True) If True, shape inference\r\n is enabled. In this case, all elements must have the same shape.\r\n element_shape: (optional, default: None) A `TensorShape` object specifying\r\n the shape constraints of each of the elements of the TensorArray.\r\n Need not be fully defined.\r\n colocate_with_first_write_call: If `True`, the TensorArray will be\r\n colocated on the same device as the Tensor used on its first write\r\n (write operations include `write`, `unstack`, and `split`). If `False`,\r\n the TensorArray will be placed on the device determined by the\r\n device context available during its initialization.\r\n name: A name for the operation (optional).\r\n\r\n Raises:\r\n ValueError: if both handle and tensor_array_name are provided.\r\n TypeError: if handle is provided but is not a Tensor.\r\n \"\"\"\r\n if handle is not None and tensor_array_name:\r\n raise ValueError(\r\n \"Cannot construct with both handle and tensor_array_name\")\r\n if handle is not None and not isinstance(handle, ops.Tensor):\r\n raise TypeError(\"Handle must be a Tensor\")\r\n if handle is None and size is None:\r\n raise ValueError(\"Size must be provided if handle is not provided\")\r\n if handle is not None and size is not None:\r\n raise ValueError(\"Cannot provide both a handle and size \"\r\n \"at the same time\")\r\n if handle is not None and element_shape is not None:\r\n raise ValueError(\"Cannot provide both a handle and element_shape \"\r\n \"at the same time\")\r\n if handle is not None and dynamic_size is not None:\r\n raise ValueError(\"Cannot provide both a handle and dynamic_size \"\r\n \"at the same time\")\r\n if handle is not None and clear_after_read is not None:\r\n raise ValueError(\"Cannot provide both a handle and clear_after_read \"\r\n \"at the same time\")\r\n\r\n if clear_after_read is None:\r\n clear_after_read = True\r\n self._dynamic_size = dynamic_size or False\r\n self._dtype = dtypes.as_dtype(dtype).base_dtype\r\n\r\n # Used to keep track of what tensors the TensorArray should be\r\n # colocated with. We choose to colocate the TensorArray with the\r\n # first tensor written to it.\r\n self._colocate_with_first_write_call = colocate_with_first_write_call\r\n if colocate_with_first_write_call:\r\n self._colocate_with = []\r\n else:\r\n self._colocate_with = None\r\n\r\n # Record the current static shape for the array elements. The element\r\n # shape is defined either by `element_shape` or the shape of the tensor\r\n # of the first write. If `infer_shape` is true, all writes checks for\r\n # shape equality.\r\n self._element_shape = [tensor_shape.as_shape(element_shape)]\r\n self._infer_shape = infer_shape\r\n with ops.name_scope(name, \"TensorArray\", [handle, size, flow]) as scope:\r\n if handle is not None:\r\n self._handle = handle\r\n if flow is None:\r\n raise ValueError(\"flow must not be None if handle is not None.\")\r\n self._flow = flow\r\n else:\r\n # Construct the TensorArray with an empty device. The first\r\n # write into the TensorArray from a Tensor with a set device\r\n # will retroactively set the device value of this op.\r\n def create():\r\n \"\"\"Create the TensorArray op.\"\"\"\r\n return gen_data_flow_ops.tensor_array_v3(\r\n dtype=dtype,\r\n size=size,\r\n element_shape=element_shape,\r\n identical_element_shapes=infer_shape,\r\n dynamic_size=self._dynamic_size,\r\n clear_after_read=clear_after_read,\r\n tensor_array_name=tensor_array_name,\r\n name=scope)\r\n if colocate_with_first_write_call:\r\n with ops.device(None), ops.colocate_with(None, ignore_existing=True):\r\n self._handle, self._flow = create()\r\n else:\r\n self._handle, self._flow = create()\r\n\r\n @property\r\n def flow(self):\r\n return self._flow\r\n\r\n @property\r\n def dtype(self):\r\n return self._dtype\r\n\r\n @property\r\n def handle(self):\r\n return self._handle\r\n\r\n @property\r\n def element_shape(self):\r\n return self._element_shape[0]\r\n\r\n def _check_element_shape(self, shape):\r\n \"\"\"Changes the element shape of the array given a shape to merge with.\r\n\r\n Args:\r\n shape: A `TensorShape` object to merge with.\r\n\r\n Raises:\r\n ValueError: if the provided shape is incompatible with the current\r\n element shape of the `TensorArray`.\r\n \"\"\"\r\n if not shape.is_compatible_with(self.element_shape):\r\n raise ValueError(\"Inconsistent shapes: saw %s but expected %s \" %\r\n (shape, self.element_shape))\r\n if self._infer_shape:\r\n self._element_shape[0] = self.element_shape.merge_with(shape)\r\n\r\n @contextlib.contextmanager\r\n def _maybe_colocate_with(self, value):\r\n \"\"\"Colocate operations with an internal colocation group or `value`.\r\n\r\n Args:\r\n value: `Tensor`, the tensor to try to colocate with.\r\n\r\n Yields:\r\n Does not yield anything, but the new context is a colocation context.\r\n\r\n If no internal colocation group is set, colocate with `value` and set\r\n the internal colocation group to be value.\r\n \"\"\"\r\n if not self._colocate_with_first_write_call:\r\n yield\r\n else:\r\n if not self._colocate_with:\r\n self._colocate_with.append(value)\r\n with ops.colocate_with(self._colocate_with[0]):\r\n yield\r\n\r\n def identity(self):\r\n \"\"\"See TensorArray.\"\"\"\r\n flow = array_ops.identity(self._flow)\r\n return build_ta_with_new_flow(self, flow)\r\n\r\n def grad(self, source, flow=None, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n # tensor_array_grad requires a flow input when forward\r\n # TensorArrays are dynamically sized. This forces the creation\r\n # of the grad TensorArray only once the final forward array's size\r\n # is fixed.\r\n if flow is None:\r\n flow = self.flow\r\n with ops.name_scope(name, \"TensorArrayGrad\", [self._handle]):\r\n with ops.colocate_with(self._handle):\r\n g_handle, unused_flow = gen_data_flow_ops.tensor_array_grad_v3(\r\n handle=self._handle, source=source, flow_in=flow, name=name)\r\n with ops.control_dependencies([g_handle]):\r\n flow = array_ops.identity(flow, name=\"gradient_flow\")\r\n g = TensorArray(\r\n dtype=self._dtype,\r\n handle=g_handle,\r\n flow=flow,\r\n infer_shape=self._infer_shape,\r\n colocate_with_first_write_call=False)\r\n # pylint: disable=protected-access\r\n g._implementation._element_shape = self._element_shape\r\n # pylint: enable=protected-access\r\n return g\r\n\r\n def read(self, index, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n value = gen_data_flow_ops.tensor_array_read_v3(\r\n handle=self._handle,\r\n index=index,\r\n flow_in=self._flow,\r\n dtype=self._dtype,\r\n name=name)\r\n if self._element_shape:\r\n value.set_shape(self._element_shape[0].dims)\r\n return value\r\n\r\n @tf_should_use.should_use_result\r\n def write(self, index, value, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n with ops.name_scope(name, \"TensorArrayWrite\", [self._handle, index, value]):\r\n # TODO(b/129870929): Fix after all callers provide proper init dtype.\r\n value = ops.convert_to_tensor(\r\n value, preferred_dtype=self._dtype, name=\"value\")\r\n _check_dtypes(value, self._dtype)\r\n self._check_element_shape(value.shape)\r\n with self._maybe_colocate_with(value):\r\n flow_out = gen_data_flow_ops.tensor_array_write_v3(\r\n handle=self._handle,\r\n index=index,\r\n value=value,\r\n flow_in=self._flow,\r\n name=name)\r\n return build_ta_with_new_flow(self, flow_out)\r\n\r\n def stack(self, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n with ops.colocate_with(self._handle):\r\n with ops.name_scope(name, \"TensorArrayStack\", [self._handle]):\r\n return self.gather(math_ops.range(0, self.size()), name=name)\r\n\r\n def gather(self, indices, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n if self._element_shape:\r\n element_shape = self._element_shape[0]\r\n else:\r\n element_shape = tensor_shape.unknown_shape(None)\r\n value = gen_data_flow_ops.tensor_array_gather_v3(\r\n handle=self._handle,\r\n indices=indices,\r\n flow_in=self._flow,\r\n dtype=self._dtype,\r\n name=name,\r\n element_shape=element_shape)\r\n if self.element_shape:\r\n value.set_shape([None] + self.element_shape.dims)\r\n return value\r\n\r\n def concat(self, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n value, _ = gen_data_flow_ops.tensor_array_concat_v3(\r\n handle=self._handle,\r\n flow_in=self._flow,\r\n dtype=self._dtype,\r\n name=name,\r\n element_shape_except0=self.element_shape[1:])\r\n if self.element_shape:\r\n value.set_shape([None] + self.element_shape.dims[1:])\r\n return value\r\n\r\n @tf_should_use.should_use_result\r\n def unstack(self, value, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n with ops.name_scope(name, \"TensorArrayUnstack\", [self._handle, value]):\r\n num_elements = array_ops.shape(value)[0]\r\n return self.scatter(\r\n indices=math_ops.range(0, num_elements), value=value, name=name)\r\n\r\n @tf_should_use.should_use_result\r\n def scatter(self, indices, value, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n with ops.name_scope(name, \"TensorArrayScatter\",\r\n [self._handle, value, indices]):\r\n # TODO(b/129870929): Fix after all callers provide proper init dtype.\r\n value = ops.convert_to_tensor(\r\n value, preferred_dtype=self._dtype, name=\"value\")\r\n _check_dtypes(value, self._dtype)\r\n if not context.executing_eagerly():\r\n self._check_element_shape(value.shape[1:])\r\n with self._maybe_colocate_with(value):\r\n flow_out = gen_data_flow_ops.tensor_array_scatter_v3(\r\n handle=self._handle,\r\n indices=indices,\r\n value=value,\r\n flow_in=self._flow,\r\n name=name)\r\n return build_ta_with_new_flow(self, flow_out)\r\n\r\n @tf_should_use.should_use_result\r\n def split(self, value, lengths, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n with ops.name_scope(name, \"TensorArraySplit\",\r\n [self._handle, value, lengths]):\r\n value = ops.convert_to_tensor(value, dtype=self._dtype, name=\"value\")\r\n with self._maybe_colocate_with(value):\r\n lengths_64 = math_ops.cast(lengths, dtypes.int64)\r\n if not context.executing_eagerly():\r\n clengths = tensor_util.constant_value(lengths_64)\r\n if value.shape.dims is not None and clengths is not None:\r\n if clengths.shape and clengths.max() == clengths.min():\r\n self._check_element_shape(\r\n tensor_shape.TensorShape([clengths[0]]).concatenate(\r\n value.shape[1:]))\r\n flow_out = gen_data_flow_ops.tensor_array_split_v3(\r\n handle=self._handle,\r\n value=value,\r\n lengths=lengths_64,\r\n flow_in=self._flow,\r\n name=name)\r\n return build_ta_with_new_flow(self, flow_out)\r\n\r\n def size(self, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n return gen_data_flow_ops.tensor_array_size_v3(\r\n handle=self._handle, flow_in=self.flow, name=name)\r\n\r\n @tf_should_use.should_use_result\r\n def close(self, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n return gen_data_flow_ops.tensor_array_close_v3(\r\n handle=self._handle, name=name)\r\n\r\n\r\nclass _GraphTensorArrayV2(object):\r\n \"\"\"Graph-mode implementation of TensorArray backed by TensorLists.\r\n\r\n The backing tensor of this TensorArray is a TensorList variant tensor which is\r\n stored in the `flow`. The `handle` is always none here. The reason we use the\r\n `flow` field and not the `handle` field is to ensure backwards compatibility\r\n with legacy control flow.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n dtype,\r\n size=None,\r\n dynamic_size=None,\r\n clear_after_read=None,\r\n tensor_array_name=None,\r\n handle=None,\r\n flow=None,\r\n infer_shape=True,\r\n element_shape=None,\r\n colocate_with_first_write_call=True,\r\n name=None):\r\n \"\"\"Constructs a graph mode TensorArray.\r\n\r\n Args:\r\n dtype: (required) data type of the TensorArray.\r\n size: (optional) int32 scalar `Tensor`: the size of the TensorArray.\r\n Required if flow is not provided.\r\n dynamic_size: (optional) Python bool: If true, writes to the TensorArray\r\n can grow the TensorArray past its initial size. Default: False.\r\n clear_after_read: (optional) unused. Not supported in TensorLists.\r\n tensor_array_name: (optional) unused.\r\n handle: (optional) Must always be None.\r\n flow: (optional) A variant `Tensor` scalar for a TensorList.\r\n infer_shape: (optional, default: True) If True, shape inference is\r\n enabled. In this case, all elements must have the same shape.\r\n element_shape: (optional, default: None) A `TensorShape` object specifying\r\n the shape constraints of each of the elements of the TensorArray. Need\r\n not be fully defined.\r\n colocate_with_first_write_call: (optional). unused.\r\n name: (optional) A name for the operation.\r\n\r\n Raises:\r\n ValueError: if both handle and tensor_array_name are provided.\r\n TypeError: if handle is provided but is not a Tensor.\r\n \"\"\"\r\n assert handle is None\r\n del handle\r\n del clear_after_read\r\n del tensor_array_name\r\n del colocate_with_first_write_call\r\n\r\n self._dynamic_size = dynamic_size\r\n\r\n if (flow is not None and\r\n (not isinstance(flow, ops.Tensor) or flow.dtype != dtypes.variant)):\r\n raise TypeError(\"flow must be a variant tensor\")\r\n if flow is None and size is None:\r\n raise ValueError(\"Size must be provided if flow is not provided\")\r\n if flow is not None and size is not None:\r\n raise ValueError(\"Cannot provide both a flow and size \"\r\n \"at the same time\")\r\n if flow is not None and element_shape is not None:\r\n raise ValueError(\"Cannot provide both a flow and element_shape \"\r\n \"at the same time\")\r\n\r\n self._dtype = dtypes.as_dtype(dtype).base_dtype\r\n\r\n # Record the current static shape for the array elements. The element\r\n # shape is defined either by `element_shape` or the shape of the tensor\r\n # of the first write. If `infer_shape` is true, all writes checks for\r\n # shape equality.\r\n self._element_shape = [tensor_shape.as_shape(element_shape)]\r\n self._infer_shape = infer_shape\r\n with ops.name_scope(name, \"TensorArrayV2\", [size, flow]) as scope:\r\n if flow is None:\r\n self._flow = list_ops.tensor_list_reserve(\r\n element_shape=element_shape,\r\n num_elements=size,\r\n element_dtype=dtype,\r\n name=scope)\r\n else:\r\n self._flow = flow\r\n\r\n # For backwards compatibility.\r\n self._colocate_with_first_write_call = None\r\n self._colocate_with = None\r\n\r\n @property\r\n def flow(self):\r\n return self._flow\r\n\r\n @property\r\n def dtype(self):\r\n return self._dtype\r\n\r\n @property\r\n def element_shape(self):\r\n return self._element_shape[0]\r\n\r\n @property\r\n def handle(self):\r\n # We intentionally do not raise an error so that legacy while_loop does not\r\n # complain.\r\n return None\r\n\r\n def _check_element_shape(self, shape):\r\n \"\"\"Changes the element shape of the array given a shape to merge with.\r\n\r\n Args:\r\n shape: A `TensorShape` object to merge with.\r\n\r\n Raises:\r\n ValueError: if the provided shape is incompatible with the current\r\n element shape of the `TensorArray`.\r\n \"\"\"\r\n if not shape.is_compatible_with(self.element_shape):\r\n raise ValueError(\"Inconsistent shapes: saw %s but expected %s \" %\r\n (shape, self.element_shape))\r\n if self._infer_shape:\r\n self._element_shape[0] = self.element_shape.merge_with(shape)\r\n\r\n def identity(self):\r\n \"\"\"See TensorArray.\"\"\"\r\n flow = array_ops.identity(self._flow)\r\n return build_ta_with_new_flow(self, flow)\r\n\r\n def grad(self, source, flow=None, name=None):\r\n \"\"\"Not supported.\"\"\"\r\n raise NotImplementedError()\r\n\r\n def read(self, index, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n with ops.name_scope(name, \"TensorArrayV2Read\", [self._flow, index]):\r\n value = list_ops.tensor_list_get_item(\r\n input_handle=self._flow,\r\n index=index,\r\n element_dtype=self._dtype,\r\n element_shape=self.element_shape,\r\n name=name)\r\n return value\r\n\r\n @tf_should_use.should_use_result\r\n def write(self, index, value, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n with ops.name_scope(name, \"TensorArrayV2Write\", [self._flow, index, value]):\r\n # TODO(b/129870929): Fix after all callers provide proper init dtype.\r\n value = ops.convert_to_tensor(\r\n value, preferred_dtype=self._dtype, name=\"value\")\r\n _check_dtypes(value, self._dtype)\r\n self._check_element_shape(value.shape)\r\n flow_out = list_ops.tensor_list_set_item(\r\n input_handle=self._flow,\r\n index=index,\r\n item=value,\r\n resize_if_index_out_of_bounds=self._dynamic_size,\r\n name=name)\r\n return build_ta_with_new_flow(self, flow_out)\r\n\r\n def stack(self, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n with ops.name_scope(name, \"TensorArrayV2Stack\", [self._flow]):\r\n value = list_ops.tensor_list_stack(\r\n input_handle=self._flow,\r\n element_dtype=self._dtype,\r\n element_shape=self.element_shape)\r\n return value\r\n\r\n def gather(self, indices, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n value = list_ops.tensor_list_gather(\r\n input_handle=self._flow,\r\n indices=indices,\r\n element_dtype=self._dtype,\r\n element_shape=self.element_shape,\r\n name=name)\r\n return value\r\n\r\n def concat(self, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n if self.element_shape:\r\n element_shape = [None] + self.element_shape.dims[1:]\r\n else:\r\n element_shape = None\r\n\r\n value = list_ops.tensor_list_concat(\r\n input_handle=self._flow,\r\n element_dtype=self._dtype,\r\n element_shape=element_shape,\r\n name=name)\r\n return value\r\n\r\n @tf_should_use.should_use_result\r\n def unstack(self, value, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n with ops.name_scope(name, \"TensorArrayUnstack\", [self._flow, value]):\r\n # TODO(b/129870929): Fix after all callers provide proper init dtype.\r\n value = ops.convert_to_tensor(\r\n value, preferred_dtype=self._dtype, name=\"value\")\r\n _check_dtypes(value, self._dtype)\r\n self._check_element_shape(value.shape[1:])\r\n flow_out = list_ops.tensor_list_from_tensor(\r\n tensor=value, element_shape=value.shape[1:])\r\n return build_ta_with_new_flow(self, flow_out)\r\n\r\n @tf_should_use.should_use_result\r\n def scatter(self, indices, value, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n with ops.name_scope(name, \"TensorArrayScatter\",\r\n [self._flow, value, indices]):\r\n # TODO(b/129870929): Fix after all callers provide proper init dtype.\r\n value = ops.convert_to_tensor(\r\n value, preferred_dtype=self._dtype, name=\"value\")\r\n _check_dtypes(value, self._dtype)\r\n self._check_element_shape(value.shape[1:])\r\n flow_out = list_ops.tensor_list_scatter(\r\n tensor=value, indices=indices, element_shape=self.element_shape,\r\n input_handle=self._flow)\r\n return build_ta_with_new_flow(self, flow_out)\r\n\r\n @tf_should_use.should_use_result\r\n def split(self, value, lengths, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n with ops.name_scope(name, \"TensorArraySplit\", [self._flow, value, lengths]):\r\n # TODO(b/129870929): Fix after all callers provide proper init dtype.\r\n value = ops.convert_to_tensor(\r\n value, preferred_dtype=self._dtype, name=\"value\")\r\n _check_dtypes(value, self._dtype)\r\n lengths_64 = math_ops.cast(lengths, dtypes.int64)\r\n if not context.executing_eagerly():\r\n clengths = tensor_util.constant_value(lengths_64)\r\n if value.shape.dims is not None and clengths is not None:\r\n if clengths.shape and clengths.max() == clengths.min():\r\n self._check_element_shape(\r\n tensor_shape.TensorShape([clengths[0]]).concatenate(\r\n value.shape[1:]))\r\n flow_out = list_ops.tensor_list_split(\r\n tensor=value,\r\n lengths=lengths_64,\r\n element_shape=self.element_shape,\r\n name=name)\r\n return build_ta_with_new_flow(self, flow_out)\r\n\r\n def size(self, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n return list_ops.tensor_list_length(input_handle=self._flow, name=name)\r\n\r\n @tf_should_use.should_use_result\r\n def close(self, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n return gen_control_flow_ops.no_op(name=name)\r\n\r\n# pylint: enable=protected-access\r\n\r\n\r\nclass _EagerTensorArray(object):\r\n \"\"\"Eager-compatible implementation of TensorArray.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n dtype,\r\n size=None,\r\n dynamic_size=None,\r\n clear_after_read=None,\r\n tensor_array_name=None,\r\n handle=None,\r\n flow=None,\r\n infer_shape=True,\r\n element_shape=None,\r\n colocate_with_first_write_call=True,\r\n name=None):\r\n \"\"\"Constructs a TensorArray compatible with eager execution.\r\n\r\n Args:\r\n dtype: (required) data type of the TensorArray.\r\n size: (optional) int32 scalar `Tensor`: the size of the TensorArray.\r\n Required if handle is not provided.\r\n dynamic_size: (optional) Python bool: If true, writes to the TensorArray\r\n can grow the TensorArray past its initial size. Default: False.\r\n clear_after_read: Boolean (optional, default: True). If True, clear\r\n TensorArray values after reading them. This disables read-many\r\n semantics, but allows early release of memory.\r\n tensor_array_name: unused.\r\n handle: unsupported.\r\n flow: unsupported.\r\n infer_shape: used for error checking, same semantics as TensorArray.\r\n element_shape: used for error checking, same semantics as TensorArray.\r\n colocate_with_first_write_call: unsupported.\r\n name: unsupported.\r\n\r\n Raises:\r\n ValueError: handle or flow are supplied, or if size is not supplied.\r\n \"\"\"\r\n\r\n del (flow, tensor_array_name, name) # Unused.\r\n\r\n if handle is not None:\r\n raise ValueError(\"TensorArray handles are not supported when eager \"\r\n \"execution is enabled.\")\r\n if size is None:\r\n raise ValueError(\"Size must be declared for TensorArrays when eager \"\r\n \"execution is enabled.\")\r\n\r\n # These attributes are not meaningful when eager is enabled, but some\r\n # library functions (e.g., those in control_flow_ops.py) access them to\r\n # create new tensor arrays; as such, we define them for the sake of\r\n # compatibility.\r\n self._handle = None\r\n # we assign a dummy value to _flow in case other code assumes it to be\r\n # a Tensor\r\n self._flow = constant_op.constant(0, dtype=dtypes.int32)\r\n self._infer_shape = infer_shape\r\n self._element_shape = tensor_shape.as_shape(element_shape)\r\n self._colocate_with_first_write_call = colocate_with_first_write_call\r\n\r\n self._dtype = dtypes.as_dtype(dtype).base_dtype\r\n self._dynamic_size = dynamic_size or False\r\n self._clear_after_read = (\r\n True if clear_after_read is None else clear_after_read)\r\n self._previously_read_indices = []\r\n\r\n if isinstance(size, ops.EagerTensor):\r\n size = size.numpy()\r\n self._tensor_array = [None for _ in range(size)]\r\n\r\n @property\r\n def flow(self):\r\n \"\"\"For compatibility; flows are not meaningful when eager is enabled.\"\"\"\r\n return self._flow\r\n\r\n @property\r\n def dtype(self):\r\n return self._dtype\r\n\r\n @property\r\n def handle(self):\r\n \"\"\"For compatibility; handles are not meaningful when eager is enabled.\"\"\"\r\n return self._handle\r\n\r\n @property\r\n def element_shape(self):\r\n return self._element_shape\r\n\r\n def identity(self):\r\n \"\"\"See TensorArray.\"\"\"\r\n return self.parent()\r\n\r\n def grad(self, source, flow=None, name=None):\r\n raise NotImplementedError(\r\n \"TensorArray.grad is not supported when executing eagerly; eager's \"\r\n \"gradient implementation does not use/need this function to compute \"\r\n \"gradients of operations that use TensorArrays.\")\r\n\r\n def read(self, index, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n del name # not meaningful when executing eagerly.\r\n\r\n if isinstance(index, ops.EagerTensor):\r\n index = index.numpy()\r\n\r\n if index < 0:\r\n raise errors_impl.OutOfRangeError(\r\n None, None,\r\n \"Reading from negative indices (index %d) is not allowed.\" % index)\r\n\r\n if index >= len(self._tensor_array):\r\n raise errors_impl.OutOfRangeError(\r\n None, None, \"Tried to read from index %d but array size is: %d\" %\r\n (index, len(self._tensor_array)))\r\n\r\n tensor = self._tensor_array[index]\r\n if tensor is None:\r\n if index in self._previously_read_indices:\r\n raise errors_impl.InvalidArgumentError(\r\n None, None,\r\n \"Could not read index %d twice because it was cleared after \"\r\n \"a previous read (perhaps try setting clear_after_read = false?)\" %\r\n index)\r\n else:\r\n tensor = self._maybe_zero(index)\r\n\r\n if self._clear_after_read:\r\n self._tensor_array[index] = None\r\n self._previously_read_indices.append(index)\r\n return tensor\r\n\r\n def _write(self, index, value):\r\n \"\"\"Writes `value` into index named by `index`.\r\n\r\n Args:\r\n index: 0-D. int32 scalar with the index to write to.\r\n value: N-D. Tensor of type `dtype`. The `Tensor` to write to `index`.\r\n\r\n Raises:\r\n errors_impl.InvalidArgumentError: `value` dtype does not match dtype.\r\n errors_impl.OutOfRangeError: `index` is out of bounds.\r\n ValueError: shape of `value` is not consistent with inferred shape.\r\n \"\"\"\r\n\r\n if isinstance(index, ops.EagerTensor):\r\n index = index.numpy()\r\n\r\n if index < 0:\r\n raise errors_impl.OutOfRangeError(\r\n None, None,\r\n \"Writing to negative indices (index %d) is not allowed.\" % index)\r\n\r\n size = len(self._tensor_array)\r\n if index >= size:\r\n if not self._dynamic_size:\r\n raise errors_impl.OutOfRangeError(\r\n None, None,\r\n \"Tried to write to index %d but array is not resizeable and size \"\r\n \"is: %d\" % (index, size))\r\n self._tensor_array.extend([None for _ in range(index - size + 1)])\r\n\r\n if not isinstance(value, ops.EagerTensor):\r\n # TODO(b/129870929): Fix after all callers provide proper init dtype.\r\n value = ops.convert_to_tensor(\r\n value, preferred_dtype=self._dtype, name=\"value\")\r\n\r\n if self._dtype != value.dtype:\r\n raise errors_impl.InvalidArgumentError(\r\n None, None,\r\n \"TensorArray dtype is %s but Op is trying to write dtype %s\" %\r\n (self._dtype.name, value.dtype.name))\r\n\r\n if not self._element_shape.is_compatible_with(value.shape):\r\n raise ValueError(\"Incompatible shape for value (%s), expected (%s)\" %\r\n (value.shape, self._element_shape))\r\n\r\n if self._infer_shape:\r\n self._element_shape = self._element_shape.merge_with(value.shape)\r\n\r\n self._tensor_array[index] = value\r\n\r\n def write(self, index, value, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n del name # not meaningful when executing eagerly.\r\n self._write(index, value)\r\n return self.parent()\r\n\r\n def _maybe_zero(self, ix):\r\n val = self._tensor_array[ix]\r\n if val is None:\r\n val = self._tensor_array[ix] = array_ops.zeros(\r\n shape=self._element_shape, dtype=self._dtype)\r\n return val\r\n\r\n def stack(self, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n if self._tensor_array:\r\n for ix in range(len(self._tensor_array)):\r\n self._maybe_zero(ix)\r\n return ops.convert_to_tensor(\r\n self._tensor_array, name=name, dtype=self._dtype)\r\n\r\n def gather(self, indices, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n del name # not meaningful when executing eagerly.\r\n if isinstance(indices, ops.EagerTensor):\r\n indices = indices.numpy()\r\n return array_ops.stack([self._maybe_zero(i) for i in indices])\r\n\r\n def concat(self, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n try:\r\n return array_ops.concat(\r\n [self._maybe_zero(ix) for ix in range(len(self._tensor_array))],\r\n 0, name=name)\r\n except errors_impl.OpError:\r\n # Reproduce a subset of the error-handling for graph-mode TensorArrays.\r\n shapes = [t.shape for t in self._tensor_array]\r\n ndims = [s.ndims for s in shapes]\r\n if 0 in ndims:\r\n idx = ndims.index(0)\r\n raise errors_impl.InvalidArgumentError(\r\n None, None, \"Concat saw a scalar shape at index %d but requires \"\r\n \"at least vectors.\" % idx)\r\n else:\r\n raise\r\n\r\n def unstack(self, value, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n tensors = array_ops.unstack(value, name=name)\r\n if len(tensors) > len(self._tensor_array) and not self._dynamic_size:\r\n raise ValueError(\r\n \"Cannot unstack %d tensors into a TensorArray of static size %d\" %\r\n (len(tensors), len(self._tensor_array)))\r\n self._tensor_array = tensors\r\n return self.parent()\r\n\r\n def scatter(self, indices, value, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n del name # not meaningful when executing eagerly.\r\n if isinstance(indices, ops.EagerTensor):\r\n indices = indices.numpy()\r\n for index, val in zip(indices, array_ops.unstack(value)):\r\n self._write(index, val) # pylint: disable=protected-access\r\n return self.parent()\r\n\r\n def split(self, value, lengths, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n # TODO(b/129870929): Fix after all callers provide proper init dtype.\r\n value = ops.convert_to_tensor(\r\n value, preferred_dtype=self._dtype, name=\"value\")\r\n _check_dtypes(value, self._dtype)\r\n lengths = ops.convert_to_tensor(lengths)\r\n sum_lengths = math_ops.reduce_sum(lengths)\r\n if lengths.shape.ndims != 1:\r\n raise errors_impl.InvalidArgumentError(\r\n None, None, \"Expected lengths to be a vector, received shape: %s\" %\r\n lengths.shape.as_list())\r\n elif value.shape.ndims == 0:\r\n raise errors_impl.InvalidArgumentError(\r\n None, None, \"Expected value to be at least a vector, \"\r\n \"but received shape: %s\" % value.shape.as_list())\r\n elif sum_lengths.numpy() != value.shape.as_list()[0]:\r\n raise errors_impl.InvalidArgumentError(\r\n None, None, \"Expected sum of lengths to be equal to \"\r\n \"values.shape[0], but sum of lengths is %d and \"\r\n \"value's shape is: %s \" % (sum_lengths.numpy(),\r\n value.shape.as_list()))\r\n elif not self._dynamic_size and lengths.shape[0] != len(self._tensor_array):\r\n raise errors_impl.InvalidArgumentError(\r\n None, None, \"TensorArray's size is not equal to the size of \"\r\n \"lengths (%d vs. %d), and the TensorArray is not marked as \"\r\n \"dynamically resizeable\" % (len(self._tensor_array),\r\n lengths.shape[0]))\r\n else:\r\n self._tensor_array = array_ops.split(value, lengths, name=name)\r\n return self.parent()\r\n\r\n def size(self, name=None):\r\n \"\"\"See TensorArray.\"\"\"\r\n del name # not meaningful when executing eagerly.\r\n return constant_op.constant(len(self._tensor_array))\r\n\r\n def close(self, name=None):\r\n del name # not meaningful when executing eagerly.\r\n del self._tensor_array[:]\r\n\r\n\r\n# TensorArray is designed to hide an underlying implementation object\r\n# and as such accesses many of that object's hidden fields.\r\n# pylint: disable=protected-access\r\n@tf_export(\"TensorArray\")\r\nclass TensorArray(object):\r\n \"\"\"Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.\r\n\r\n This class is meant to be used with dynamic iteration primitives such as\r\n `while_loop` and `map_fn`. It supports gradient back-propagation via special\r\n \"flow\" control flow dependencies.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n dtype,\r\n size=None,\r\n dynamic_size=None,\r\n clear_after_read=None,\r\n tensor_array_name=None,\r\n handle=None,\r\n flow=None,\r\n infer_shape=True,\r\n element_shape=None,\r\n colocate_with_first_write_call=True,\r\n name=None):\r\n \"\"\"Construct a new TensorArray or wrap an existing TensorArray handle.\r\n\r\n A note about the parameter `name`:\r\n\r\n The name of the `TensorArray` (even if passed in) is uniquified: each time\r\n a new `TensorArray` is created at runtime it is assigned its own name for\r\n the duration of the run. This avoids name collisions if a `TensorArray`\r\n is created within a `while_loop`.\r\n\r\n Args:\r\n dtype: (required) data type of the TensorArray.\r\n size: (optional) int32 scalar `Tensor`: the size of the TensorArray.\r\n Required if handle is not provided.\r\n dynamic_size: (optional) Python bool: If true, writes to the TensorArray\r\n can grow the TensorArray past its initial size. Default: False.\r\n clear_after_read: Boolean (optional, default: True). If True, clear\r\n TensorArray values after reading them. This disables read-many\r\n semantics, but allows early release of memory.\r\n tensor_array_name: (optional) Python string: the name of the TensorArray.\r\n This is used when creating the TensorArray handle. If this value is\r\n set, handle should be None.\r\n handle: (optional) A `Tensor` handle to an existing TensorArray. If this\r\n is set, tensor_array_name should be None. Only supported in graph mode.\r\n flow: (optional) A float `Tensor` scalar coming from an existing\r\n `TensorArray.flow`. Only supported in graph mode.\r\n infer_shape: (optional, default: True) If True, shape inference\r\n is enabled. In this case, all elements must have the same shape.\r\n element_shape: (optional, default: None) A `TensorShape` object specifying\r\n the shape constraints of each of the elements of the TensorArray.\r\n Need not be fully defined.\r\n colocate_with_first_write_call: If `True`, the TensorArray will be\r\n colocated on the same device as the Tensor used on its first write\r\n (write operations include `write`, `unstack`, and `split`). If `False`,\r\n the TensorArray will be placed on the device determined by the\r\n device context available during its initialization.\r\n name: A name for the operation (optional).\r\n\r\n Raises:\r\n ValueError: if both handle and tensor_array_name are provided.\r\n TypeError: if handle is provided but is not a Tensor.\r\n \"\"\"\r\n if (context.executing_eagerly() and\r\n (flow is None or flow.dtype != dtypes.variant)):\r\n # It is possible to create a Variant-style TensorArray even in eager mode,\r\n # and this is fine but can have performance implications in eager.\r\n # An example of when this happens is if a tf.function returns a\r\n # TensorArray in its output; its flow variant object is returned to Eager.\r\n # This can be wrapped back up in a Variant-style TensorArray.\r\n implementation = _EagerTensorArray\r\n elif (flow is not None and flow.dtype == dtypes.variant or\r\n control_flow_util.EnableControlFlowV2(ops.get_default_graph())):\r\n implementation = _GraphTensorArrayV2\r\n else:\r\n implementation = _GraphTensorArray\r\n self._implementation = implementation(\r\n dtype,\r\n size=size,\r\n dynamic_size=dynamic_size,\r\n clear_after_read=clear_after_read,\r\n tensor_array_name=tensor_array_name,\r\n handle=handle,\r\n flow=flow,\r\n infer_shape=infer_shape,\r\n element_shape=element_shape,\r\n colocate_with_first_write_call=colocate_with_first_write_call,\r\n name=name)\r\n\r\n self._implementation.parent = weakref.ref(self)\r\n\r\n @property\r\n def flow(self):\r\n \"\"\"The flow `Tensor` forcing ops leading to this TensorArray state.\"\"\"\r\n return self._implementation._flow\r\n\r\n @property\r\n def dtype(self):\r\n \"\"\"The data type of this TensorArray.\"\"\"\r\n return self._implementation._dtype\r\n\r\n @property\r\n def handle(self):\r\n \"\"\"The reference to the TensorArray.\"\"\"\r\n return self._implementation.handle\r\n\r\n @property\r\n def element_shape(self):\r\n \"\"\"The `tf.TensorShape` of elements in this TensorArray.\"\"\"\r\n return self._implementation.element_shape\r\n\r\n @property\r\n def dynamic_size(self):\r\n \"\"\"Python bool; if `True` the TensorArray can grow dynamically.\"\"\"\r\n return self._implementation._dynamic_size\r\n\r\n @property\r\n def _infer_shape(self):\r\n # TODO(slebedev): consider making public or changing TensorArrayStructure\r\n # to access _implementation directly. Note that dynamic_size is also\r\n # only used by TensorArrayStructure.\r\n return self._implementation._infer_shape\r\n\r\n def identity(self):\r\n \"\"\"Returns a TensorArray with the same content and properties.\r\n\r\n Returns:\r\n A new TensorArray object with flow that ensures the control dependencies\r\n from the contexts will become control dependencies for writes, reads, etc.\r\n Use this object all for subsequent operations.\r\n \"\"\"\r\n return self._implementation.identity()\r\n\r\n def grad(self, source, flow=None, name=None):\r\n return self._implementation.grad(source, flow=flow, name=name)\r\n\r\n def read(self, index, name=None):\r\n \"\"\"Read the value at location `index` in the TensorArray.\r\n\r\n Args:\r\n index: 0-D. int32 tensor with the index to read from.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n The tensor at index `index`.\r\n \"\"\"\r\n return self._implementation.read(index, name=name)\r\n\r\n def write(self, index, value, name=None):\r\n \"\"\"Write `value` into index `index` of the TensorArray.\r\n\r\n Args:\r\n index: 0-D. int32 scalar with the index to write to.\r\n value: N-D. Tensor of type `dtype`. The Tensor to write to this index.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A new TensorArray object with flow that ensures the write occurs.\r\n Use this object all for subsequent operations.\r\n\r\n Raises:\r\n ValueError: if there are more writers than specified.\r\n \"\"\"\r\n return self._implementation.write(index, value, name=name)\r\n\r\n def stack(self, name=None):\r\n \"\"\"Return the values in the TensorArray as a stacked `Tensor`.\r\n\r\n All of the values must have been written and their shapes must all match.\r\n If input shapes have rank-`R`, then output shape will have rank-`(R+1)`.\r\n\r\n Args:\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n All the tensors in the TensorArray stacked into one tensor.\r\n \"\"\"\r\n return self._implementation.stack(name=name)\r\n\r\n def gather(self, indices, name=None):\r\n \"\"\"Return selected values in the TensorArray as a packed `Tensor`.\r\n\r\n All of selected values must have been written and their shapes\r\n must all match.\r\n\r\n Args:\r\n indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If\r\n the `TensorArray` is not dynamic, `max_value=size()`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n The tensors in the `TensorArray` selected by `indices`, packed into one\r\n tensor.\r\n \"\"\"\r\n return self._implementation.gather(indices, name=name)\r\n\r\n def concat(self, name=None):\r\n \"\"\"Return the values in the TensorArray as a concatenated `Tensor`.\r\n\r\n All of the values must have been written, their ranks must match, and\r\n and their shapes must all match for all dimensions except the first.\r\n\r\n Args:\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n All the tensors in the TensorArray concatenated into one tensor.\r\n \"\"\"\r\n return self._implementation.concat(name=name)\r\n\r\n @tf_should_use.should_use_result\r\n def unstack(self, value, name=None):\r\n \"\"\"Unstack the values of a `Tensor` in the TensorArray.\r\n\r\n If input value shapes have rank-`R`, then the output TensorArray will\r\n contain elements whose shapes are rank-`(R-1)`.\r\n\r\n Args:\r\n value: (N+1)-D. Tensor of type `dtype`. The Tensor to unstack.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A new TensorArray object with flow that ensures the unstack occurs.\r\n Use this object all for subsequent operations.\r\n\r\n Raises:\r\n ValueError: if the shape inference fails.\r\n \"\"\"\r\n return self._implementation.unstack(value, name=name)\r\n\r\n @tf_should_use.should_use_result\r\n def scatter(self, indices, value, name=None):\r\n \"\"\"Scatter the values of a `Tensor` in specific indices of a `TensorArray`.\r\n\r\n Args:\r\n indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If\r\n the `TensorArray` is not dynamic, `max_value=size()`.\r\n value: (N+1)-D. Tensor of type `dtype`. The Tensor to unpack.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A new TensorArray object with flow that ensures the scatter occurs.\r\n Use this object all for subsequent operations.\r\n\r\n Raises:\r\n ValueError: if the shape inference fails.\r\n \"\"\"\r\n return self._implementation.scatter(indices, value, name=name)\r\n\r\n @tf_should_use.should_use_result\r\n def split(self, value, lengths, name=None):\r\n \"\"\"Split the values of a `Tensor` into the TensorArray.\r\n\r\n Args:\r\n value: (N+1)-D. Tensor of type `dtype`. The Tensor to split.\r\n lengths: 1-D. int32 vector with the lengths to use when splitting\r\n `value` along its first dimension.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A new TensorArray object with flow that ensures the split occurs.\r\n Use this object all for subsequent operations.\r\n\r\n Raises:\r\n ValueError: if the shape inference fails.\r\n \"\"\"\r\n return self._implementation.split(value, lengths, name=name)\r\n\r\n def size(self, name=None):\r\n \"\"\"Return the size of the TensorArray.\"\"\"\r\n return self._implementation.size(name=name)\r\n\r\n @tf_should_use.should_use_result\r\n def close(self, name=None):\r\n \"\"\"Close the current TensorArray.\"\"\"\r\n return self._implementation.close(name=name)\r\n\r\n\r\ndef build_ta_with_new_flow(old_ta, flow):\r\n \"\"\"Builds a TensorArray with a new `flow` tensor.\"\"\"\r\n # Sometimes we get old_ta as the implementation, sometimes it's the\r\n # TensorArray wrapper object.\r\n impl = (old_ta._implementation if isinstance(old_ta, TensorArray)\r\n else old_ta)\r\n\r\n if not context.executing_eagerly():\r\n if (not isinstance(impl, _GraphTensorArrayV2) and\r\n control_flow_util.EnableControlFlowV2(ops.get_default_graph())):\r\n raise NotImplementedError(\"Attempting to build a graph-mode TF2-style \"\r\n \"TensorArray from either an eager-mode \"\r\n \"TensorArray or a TF1-style TensorArray. \"\r\n \"This is not currently supported. You may be \"\r\n \"attempting to capture a TensorArray \"\r\n \"inside a tf.function or tf.data map function. \"\r\n \"Instead, construct a new TensorArray inside \"\r\n \"the function.\")\r\n new_ta = TensorArray(\r\n dtype=impl.dtype,\r\n handle=impl.handle,\r\n flow=flow,\r\n infer_shape=impl._infer_shape,\r\n colocate_with_first_write_call=impl._colocate_with_first_write_call)\r\n new_impl = new_ta._implementation\r\n new_impl._dynamic_size = impl._dynamic_size\r\n new_impl._colocate_with = impl._colocate_with\r\n new_impl._element_shape = impl._element_shape # Share _element_shape.\r\n return new_ta\r\n\r\n# pylint: enable=protected-access\r\n\r\n\r\ndef _check_dtypes(value, dtype):\r\n if value.dtype != dtype:\r\n logging.error(\r\n \"Error: Input value {} has dtype {}, but expected dtype {}. \"\r\n \"This leads to undefined behavior and will be an error \"\r\n \"in future versions of TensorFlow. Traceback:\\n{}\".format(\r\n value, str(value.dtype), str(dtype),\r\n \"\".join(traceback.format_stack())))\r\n\r\n\r\n@tf_export(\"TensorArraySpec\")\r\nclass TensorArraySpec(type_spec.TypeSpec):\r\n \"\"\"Type specification for a `tf.TensorArray`.\"\"\"\r\n\r\n __slots__ = [\"_element_shape\", \"_dtype\", \"_dynamic_size\", \"_infer_shape\"]\r\n\r\n value_type = property(lambda self: TensorArray)\r\n\r\n def __init__(self, element_shape=None, dtype=dtypes.float32,\r\n dynamic_size=False, infer_shape=True):\r\n \"\"\"Constructs a type specification for a `tf.TensorArray`.\r\n\r\n Args:\r\n element_shape: The shape of each element in the `TensorArray`.\r\n dtype: Data type of the `TensorArray`.\r\n dynamic_size: Whether the `TensorArray` can grow past its initial size.\r\n infer_shape: Whether shape inference is enabled.\r\n \"\"\"\r\n self._element_shape = tensor_shape.as_shape(element_shape)\r\n self._dtype = dtypes.as_dtype(dtype)\r\n self._dynamic_size = dynamic_size\r\n self._infer_shape = infer_shape\r\n\r\n def is_compatible_with(self, other):\r\n # pylint: disable=protected-access\r\n if not isinstance(other, type_spec.TypeSpec):\r\n other = type_spec.type_spec_from_value(other)\r\n\r\n # Note: we intentionally exclude infer_shape in this check.\r\n return (isinstance(other, TensorArraySpec) and\r\n self._dtype.is_compatible_with(other._dtype) and\r\n self._element_shape.is_compatible_with(other._element_shape) and\r\n self._dynamic_size == other._dynamic_size)\r\n\r\n def most_specific_compatible_type(self, other):\r\n # pylint: disable=protected-access\r\n if not self.is_compatible_with(other):\r\n raise ValueError(\"Types are not compatible\")\r\n infer_shape = self._infer_shape and other._infer_shape\r\n return TensorArraySpec(\r\n self._element_shape.most_specific_compatible_shape(\r\n other._element_shape),\r\n self._dtype, self._dynamic_size, infer_shape)\r\n\r\n def _serialize(self):\r\n return (self._element_shape, self._dtype, self._dynamic_size,\r\n self._infer_shape)\r\n\r\n @property\r\n def _component_specs(self):\r\n return [tensor_spec.TensorSpec([], dtypes.variant)]\r\n\r\n def _to_components(self, value):\r\n if not isinstance(value, TensorArray):\r\n raise TypeError(\"value must be a TensorArray, but saw: {}\"\r\n .format(type(value)))\r\n if value.flow is not None and value.flow.dtype == dtypes.variant:\r\n return [value.flow]\r\n else:\r\n # Convert to a TF2-style TensorArray.\r\n # TODO(ebrevdo): Add an \"_as_variant\" method to TensorArray class, or\r\n # \"implementation / as_variant\" arg to TensorArray constructor.\r\n with ops.name_scope(\"convert_tensor_array\"):\r\n flow = list_ops.tensor_list_from_tensor(\r\n tensor=value.stack(), element_shape=value.element_shape)\r\n return [flow]\r\n\r\n def _from_components(self, tensor_list):\r\n # This will return a TF2 Graph-style TensorArray because tensor_list[0] is\r\n # a variant object. size == -1 implies unknown size.\r\n ret = TensorArray(\r\n dtype=self._dtype,\r\n flow=tensor_list[0],\r\n dynamic_size=self._dynamic_size,\r\n infer_shape=self._infer_shape)\r\n ret._implementation._element_shape = [self._element_shape] # pylint: disable=protected-access\r\n return ret\r\n\r\n @staticmethod\r\n def from_value(value):\r\n if not isinstance(value, TensorArray):\r\n raise TypeError(\"Expected value to be a TensorArray, but saw: {}\".\r\n format(type(value)))\r\n\r\n return TensorArraySpec(\r\n dtype=value.dtype,\r\n element_shape=value.element_shape,\r\n dynamic_size=value.dynamic_size,\r\n infer_shape=value._infer_shape) # pylint: disable=protected-access\r\n\r\n def _to_legacy_output_types(self):\r\n return self._dtype\r\n\r\n def _to_legacy_output_shapes(self):\r\n # Sneak the dynamic_size and infer_shape values into the legacy shape.\r\n return (tensor_shape.TensorShape([self._dynamic_size, self._infer_shape\r\n ]).concatenate(self._element_shape))\r\n\r\n def _to_legacy_output_classes(self):\r\n return TensorArray\r\n\r\n\r\n# Register the TypeSpec for TensorArray. If TensorArray is updated to be a\r\n# CompositeTensor, then this registration can be deleted.\r\ntype_spec.register_type_spec_from_value_converter(\r\n TensorArray, TensorArraySpec.from_value, allow_subclass=True)\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Monte Carlo integration and helpers.\r\n\r\nUse [tfp.monte_carlo](/probability/api_docs/python/tfp/monte_carlo) instead.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\n# go/tf-wildcard-import\r\n# pylint: disable=wildcard-import\r\nfrom tensorflow.contrib.bayesflow.python.ops.monte_carlo_impl import *\r\n# pylint: enable=wildcard-import\r\nfrom tensorflow.python.util.all_util import remove_undocumented\r\n\r\n_allowed_symbols = [\r\n 'expectation',\r\n 'expectation_importance_sampler',\r\n 'expectation_importance_sampler_logspace',\r\n]\r\n\r\nremove_undocumented(__name__, _allowed_symbols)\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Bijector Ops.\r\n\r\nUse [tfp.bijectors](/probability/api_docs/python/tfp/bijectors) instead.\r\n\r\n@@AbsoluteValue\r\n@@Affine\r\n@@AffineLinearOperator\r\n@@AffineScalar\r\n@@Bijector\r\n@@BatchNormalization\r\n@@Chain\r\n@@CholeskyOuterProduct\r\n@@ConditionalBijector\r\n@@Exp\r\n@@FillTriangular\r\n@@Gumbel\r\n@@Identity\r\n@@Inline\r\n@@Invert\r\n@@Kumaraswamy\r\n@@MaskedAutoregressiveFlow\r\n@@MatrixInverseTriL\r\n@@Ordered\r\n@@Permute\r\n@@PowerTransform\r\n@@RealNVP\r\n@@Reshape\r\n@@ScaleTriL\r\n@@Sigmoid\r\n@@SinhArcsinh\r\n@@SoftmaxCentered\r\n@@Softplus\r\n@@Softsign\r\n@@Square\r\n@@TransformDiagonal\r\n@@Weibull\r\n\r\n@@masked_autoregressive_default_template\r\n@@masked_dense\r\n@@real_nvp_default_template\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\n# pylint: disable=unused-import,wildcard-import,line-too-long,g-importing-member\r\n\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.absolute_value import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.affine import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.batch_normalization import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.chain import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.cholesky_outer_product import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.conditional_bijector import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.exp import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.fill_triangular import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.gumbel import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.inline import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.invert import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.kumaraswamy import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.matrix_inverse_tril import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.ordered import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.permute import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.power_transform import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.real_nvp import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.reshape import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.scale_tril import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.sinh_arcsinh import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.softplus import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.softsign import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.square import *\r\nfrom tensorflow.contrib.distributions.python.ops.bijectors.transform_diagonal import *\r\nfrom tensorflow.python.ops.distributions.bijector import *\r\nfrom tensorflow.python.ops.distributions.identity_bijector import Identity\r\n\r\n# pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member\r\n\r\nfrom tensorflow.python.util.all_util import remove_undocumented\r\n\r\nremove_undocumented(__name__)\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for `tf.data.Dataset.shard()`.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.data.kernel_tests import test_base\r\nfrom tensorflow.python.data.ops import dataset_ops\r\nfrom tensorflow.python.framework import errors\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\n@test_util.run_v1_only(\"deprecated API, no eager or V2 test coverage\")\r\nclass ShardTest(test_base.DatasetTestBase):\r\n\r\n def testSimpleCase(self):\r\n dataset = dataset_ops.Dataset.range(10).shard(5, 2)\r\n self.assertDatasetProduces(dataset, expected_output=[2, 7])\r\n\r\n def testNestedData(self):\r\n dataset_a = dataset_ops.Dataset.range(10)\r\n dataset_b = dataset_ops.Dataset.range(10, 0, -1)\r\n dataset = dataset_ops.Dataset.zip((dataset_a, dataset_b)).shard(5, 2)\r\n self.assertDatasetProduces(dataset, expected_output=[(2, 8), (7, 3)])\r\n\r\n def testOffsetZero(self):\r\n dataset = dataset_ops.Dataset.range(10).shard(5, 0)\r\n self.assertDatasetProduces(dataset, expected_output=[0, 5])\r\n\r\n def testOffsetGreaterNumShards(self):\r\n with self.assertRaises(errors.InvalidArgumentError):\r\n dataset = dataset_ops.Dataset.range(10).shard(5, 7)\r\n self.evaluate(self.getNext(dataset)())\r\n\r\n def testNegativeOffset(self):\r\n with self.assertRaises(errors.InvalidArgumentError):\r\n dataset = dataset_ops.Dataset.range(10).shard(5, -3)\r\n self.evaluate(self.getNext(dataset)())\r\n\r\n def testNegativeNumShards(self):\r\n with self.assertRaises(errors.InvalidArgumentError):\r\n dataset = dataset_ops.Dataset.range(10).shard(-3, 1)\r\n self.evaluate(self.getNext(dataset)())\r\n\r\n def testZeroNumShards(self):\r\n with self.assertRaises(errors.InvalidArgumentError):\r\n dataset = dataset_ops.Dataset.range(10).shard(0, 1)\r\n self.evaluate(self.getNext(dataset)())\r\n\r\n def testIteratorEndsBeforeFirstElem(self):\r\n dataset = dataset_ops.Dataset.range(1).shard(5, 2)\r\n self.assertDatasetProduces(dataset, expected_output=[])\r\n\r\n def testLargerWorkerPool(self):\r\n dataset = dataset_ops.Dataset.range(10).shard(7, 5)\r\n self.assertDatasetProduces(dataset, expected_output=[5])\r\n\r\n def testIndexEqualsNumShards(self):\r\n dataset = dataset_ops.Dataset.range(10).shard(5, 4)\r\n self.assertDatasetProduces(dataset, expected_output=[4, 9])\r\n\r\n def testIndexEqualsNumShards2(self):\r\n dataset = dataset_ops.Dataset.range(10).shard(4, 3)\r\n self.assertDatasetProduces(dataset, expected_output=[3, 7])\r\n\r\n def testNumShardsLargerThanDataset(self):\r\n dataset = dataset_ops.Dataset.range(10).shard(20, 5)\r\n self.assertDatasetProduces(dataset, expected_output=[5])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Utilities for multi-gpu training.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.keras import backend as K\r\nfrom tensorflow.python.keras.engine.training import Model\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.util import deprecation\r\nfrom tensorflow.python.util.tf_export import keras_export\r\n\r\n\r\ndef _get_available_devices():\r\n return [x.name for x in K.get_session().list_devices()]\r\n\r\n\r\ndef _normalize_device_name(name):\r\n name = '/' + name.lower().split('device:')[1]\r\n return name\r\n\r\n\r\n@keras_export('keras.utils.multi_gpu_model')\r\[email protected](\r\n '2020-04-01', 'Use `tf.distribute.MirroredStrategy` instead.')\r\ndef multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):\r\n \"\"\"Replicates a model on different GPUs.\r\n\r\n Specifically, this function implements single-machine\r\n multi-GPU data parallelism. It works in the following way:\r\n\r\n - Divide the model's input(s) into multiple sub-batches.\r\n - Apply a model copy on each sub-batch. Every model copy\r\n is executed on a dedicated GPU.\r\n - Concatenate the results (on CPU) into one big batch.\r\n\r\n E.g. if your `batch_size` is 64 and you use `gpus=2`,\r\n then we will divide the input into 2 sub-batches of 32 samples,\r\n process each sub-batch on one GPU, then return the full\r\n batch of 64 processed samples.\r\n\r\n This induces quasi-linear speedup on up to 8 GPUs.\r\n\r\n This function is only available with the TensorFlow backend\r\n for the time being.\r\n\r\n Arguments:\r\n model: A Keras model instance. To avoid OOM errors,\r\n this model could have been built on CPU, for instance\r\n (see usage example below).\r\n gpus: Integer >= 2, number of on GPUs on which to create\r\n model replicas.\r\n cpu_merge: A boolean value to identify whether to force\r\n merging model weights under the scope of the CPU or not.\r\n cpu_relocation: A boolean value to identify whether to\r\n create the model's weights under the scope of the CPU.\r\n If the model is not defined under any preceding device\r\n scope, you can still rescue it by activating this option.\r\n\r\n Returns:\r\n A Keras `Model` instance which can be used just like the initial\r\n `model` argument, but which distributes its workload on multiple GPUs.\r\n\r\n Example 1: Training models with weights merge on CPU\r\n\r\n ```python\r\n import tensorflow as tf\r\n from keras.applications import Xception\r\n from keras.utils import multi_gpu_model\r\n import numpy as np\r\n\r\n num_samples = 1000\r\n height = 224\r\n width = 224\r\n num_classes = 1000\r\n\r\n # Instantiate the base model (or \"template\" model).\r\n # We recommend doing this with under a CPU device scope,\r\n # so that the model's weights are hosted on CPU memory.\r\n # Otherwise they may end up hosted on a GPU, which would\r\n # complicate weight sharing.\r\n with tf.device('/cpu:0'):\r\n model = Xception(weights=None,\r\n input_shape=(height, width, 3),\r\n classes=num_classes)\r\n\r\n # Replicates the model on 8 GPUs.\r\n # This assumes that your machine has 8 available GPUs.\r\n parallel_model = multi_gpu_model(model, gpus=8)\r\n parallel_model.compile(loss='categorical_crossentropy',\r\n optimizer='rmsprop')\r\n\r\n # Generate dummy data.\r\n x = np.random.random((num_samples, height, width, 3))\r\n y = np.random.random((num_samples, num_classes))\r\n\r\n # This `fit` call will be distributed on 8 GPUs.\r\n # Since the batch size is 256, each GPU will process 32 samples.\r\n parallel_model.fit(x, y, epochs=20, batch_size=256)\r\n\r\n # Save model via the template model (which shares the same weights):\r\n model.save('my_model.h5')\r\n ```\r\n\r\n Example 2: Training models with weights merge on CPU using cpu_relocation\r\n\r\n ```python\r\n ..\r\n # Not needed to change the device scope for model definition:\r\n model = Xception(weights=None, ..)\r\n\r\n try:\r\n model = multi_gpu_model(model, cpu_relocation=True)\r\n print(\"Training using multiple GPUs..\")\r\n except:\r\n print(\"Training using single GPU or CPU..\")\r\n\r\n model.compile(..)\r\n ..\r\n ```\r\n\r\n Example 3: Training models with weights merge on GPU (recommended for NV-link)\r\n\r\n ```python\r\n ..\r\n # Not needed to change the device scope for model definition:\r\n model = Xception(weights=None, ..)\r\n\r\n try:\r\n model = multi_gpu_model(model, cpu_merge=False)\r\n print(\"Training using multiple GPUs..\")\r\n except:\r\n print(\"Training using single GPU or CPU..\")\r\n model.compile(..)\r\n ..\r\n ```\r\n\r\n Raises:\r\n ValueError: if the `gpus` argument does not match available devices.\r\n \"\"\"\r\n # pylint: disable=g-import-not-at-top\r\n from tensorflow.python.keras.layers.core import Lambda\r\n from tensorflow.python.keras.layers.merge import concatenate\r\n\r\n if isinstance(gpus, (list, tuple)):\r\n if len(gpus) <= 1:\r\n raise ValueError('For multi-gpu usage to be effective, '\r\n 'call `multi_gpu_model` with `len(gpus) >= 2`. '\r\n 'Received: `gpus=%s`' % gpus)\r\n num_gpus = len(gpus)\r\n target_gpu_ids = gpus\r\n else:\r\n if gpus <= 1:\r\n raise ValueError('For multi-gpu usage to be effective, '\r\n 'call `multi_gpu_model` with `gpus >= 2`. '\r\n 'Received: `gpus=%s`' % gpus)\r\n num_gpus = gpus\r\n target_gpu_ids = range(num_gpus)\r\n\r\n target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in target_gpu_ids]\r\n available_devices = _get_available_devices()\r\n available_devices = [\r\n _normalize_device_name(name) for name in available_devices\r\n ]\r\n for device in target_devices:\r\n if device not in available_devices:\r\n raise ValueError('To call `multi_gpu_model` with `gpus=%s`, '\r\n 'we expect the following devices to be available: %s. '\r\n 'However this machine only has: %s. '\r\n 'Try reducing `gpus`.' % (gpus, target_devices,\r\n available_devices))\r\n\r\n def get_slice(data, i, parts):\r\n \"\"\"Slice an array into `parts` slices and return slice `i`.\r\n\r\n Arguments:\r\n data: array to slice.\r\n i: index of slice to return.\r\n parts: number of slices to make.\r\n\r\n Returns:\r\n Slice `i` of `data`.\r\n \"\"\"\r\n shape = array_ops.shape(data)\r\n batch_size = shape[:1]\r\n input_shape = shape[1:]\r\n step = batch_size // parts\r\n if i == parts - 1:\r\n size = batch_size - step * i\r\n else:\r\n size = step\r\n size = array_ops.concat([size, input_shape], axis=0)\r\n stride = array_ops.concat([step, input_shape * 0], axis=0)\r\n start = stride * i\r\n return array_ops.slice(data, start, size)\r\n\r\n # Relocate the model definition under CPU device scope if needed\r\n if cpu_relocation:\r\n from tensorflow.python.keras.models import clone_model # pylint: disable=g-import-not-at-top\r\n with ops.device('/cpu:0'):\r\n model = clone_model(model)\r\n\r\n all_outputs = [[] for _ in range(len(model.outputs))]\r\n\r\n # Place a copy of the model on each GPU,\r\n # each getting a slice of the inputs.\r\n for i, gpu_id in enumerate(target_gpu_ids):\r\n with ops.device('/gpu:%d' % gpu_id):\r\n with K.name_scope('replica_%d' % gpu_id):\r\n inputs = []\r\n # Retrieve a slice of the input.\r\n for x in model.inputs:\r\n input_shape = tuple(x.shape.as_list())[1:]\r\n slice_i = Lambda(\r\n get_slice,\r\n output_shape=input_shape,\r\n arguments={\r\n 'i': i,\r\n 'parts': num_gpus\r\n })(\r\n x)\r\n inputs.append(slice_i)\r\n\r\n # Apply model on slice\r\n # (creating a model replica on the target device).\r\n outputs = model(inputs)\r\n if not isinstance(outputs, list):\r\n outputs = [outputs]\r\n\r\n # Save the outputs for merging back together later.\r\n for o, output in enumerate(outputs):\r\n all_outputs[o].append(output)\r\n\r\n # Deduplicate output names to handle Siamese networks.\r\n occurrences = {}\r\n for n in model.output_names:\r\n if n not in occurrences:\r\n occurrences[n] = 1\r\n else:\r\n occurrences[n] += 1\r\n conflict_counter = {n: 0 for n, count in occurrences.items() if count > 1}\r\n output_names = []\r\n for n in model.output_names:\r\n if n in conflict_counter:\r\n conflict_counter[n] += 1\r\n n += '_%d' % conflict_counter[n]\r\n output_names.append(n)\r\n\r\n # Merge outputs under expected scope.\r\n with ops.device('/cpu:0' if cpu_merge else '/gpu:%d' % target_gpu_ids[0]):\r\n merged = []\r\n for name, outputs in zip(output_names, all_outputs):\r\n merged.append(concatenate(outputs, axis=0, name=name))\r\n return Model(model.inputs, merged)\r\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import linalg_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import variables as variables_module\r\nfrom tensorflow.python.ops.linalg import linalg as linalg_lib\r\nfrom tensorflow.python.ops.linalg import linear_operator_householder as householder\r\nfrom tensorflow.python.ops.linalg import linear_operator_test_util\r\nfrom tensorflow.python.platform import test\r\n\r\nlinalg = linalg_lib\r\nCheckTapeSafeSkipOptions = linear_operator_test_util.CheckTapeSafeSkipOptions\r\n\r\n\r\n@test_util.run_all_in_graph_and_eager_modes\r\nclass LinearOperatorHouseholderTest(\r\n linear_operator_test_util.SquareLinearOperatorDerivedClassTest):\r\n \"\"\"Most tests done in the base class LinearOperatorDerivedClassTest.\"\"\"\r\n\r\n @staticmethod\r\n def operator_shapes_infos():\r\n shape_info = linear_operator_test_util.OperatorShapesInfo\r\n return [\r\n shape_info((1, 1)),\r\n shape_info((1, 3, 3)),\r\n shape_info((3, 4, 4)),\r\n shape_info((2, 1, 4, 4))]\r\n\r\n @staticmethod\r\n def skip_these_tests():\r\n # This linear operator is never positive definite.\r\n return [\"cholesky\"]\r\n\r\n def operator_and_matrix(\r\n self, build_info, dtype, use_placeholder,\r\n ensure_self_adjoint_and_pd=False):\r\n shape = list(build_info.shape)\r\n reflection_axis = linear_operator_test_util.random_sign_uniform(\r\n shape[:-1], minval=1., maxval=2., dtype=dtype)\r\n # Make sure unit norm.\r\n reflection_axis = reflection_axis / linalg_ops.norm(\r\n reflection_axis, axis=-1, keepdims=True)\r\n\r\n lin_op_reflection_axis = reflection_axis\r\n\r\n if use_placeholder:\r\n lin_op_reflection_axis = array_ops.placeholder_with_default(\r\n reflection_axis, shape=None)\r\n\r\n operator = householder.LinearOperatorHouseholder(lin_op_reflection_axis)\r\n\r\n mat = reflection_axis[..., array_ops.newaxis]\r\n matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True)\r\n matrix = array_ops.matrix_set_diag(\r\n matrix, 1. + array_ops.matrix_diag_part(matrix))\r\n\r\n return operator, matrix\r\n\r\n def test_scalar_reflection_axis_raises(self):\r\n with self.assertRaisesRegexp(ValueError, \"must have at least 1 dimension\"):\r\n householder.LinearOperatorHouseholder(1.)\r\n\r\n def test_householder_adjoint_type(self):\r\n reflection_axis = [1., 3., 5., 8.]\r\n operator = householder.LinearOperatorHouseholder(reflection_axis)\r\n self.assertIsInstance(\r\n operator.adjoint(), householder.LinearOperatorHouseholder)\r\n\r\n def test_householder_inverse_type(self):\r\n reflection_axis = [1., 3., 5., 8.]\r\n operator = householder.LinearOperatorHouseholder(reflection_axis)\r\n self.assertIsInstance(\r\n operator.inverse(), householder.LinearOperatorHouseholder)\r\n\r\n def test_tape_safe(self):\r\n reflection_axis = variables_module.Variable([1., 3., 5., 8.])\r\n operator = householder.LinearOperatorHouseholder(reflection_axis)\r\n self.check_tape_safe(\r\n operator,\r\n skip_options=[\r\n # Determinant hard-coded as 1.\r\n CheckTapeSafeSkipOptions.DETERMINANT,\r\n CheckTapeSafeSkipOptions.LOG_ABS_DETERMINANT,\r\n # Trace hard-coded.\r\n CheckTapeSafeSkipOptions.TRACE,\r\n ])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n linear_operator_test_util.add_tests(LinearOperatorHouseholderTest)\r\n test.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for the IgnoreErrors input pipeline ops.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base\r\nfrom tensorflow.python.data.experimental.ops import error_ops\r\nfrom tensorflow.python.data.ops import dataset_ops\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\nclass IgnoreErrorsSerializationTest(\r\n dataset_serialization_test_base.DatasetSerializationTestBase):\r\n\r\n def _build_ds(self):\r\n return dataset_ops.Dataset.range(5).map(\r\n array_ops.ones).map(lambda x: array_ops.gather(x, [0])).apply(\r\n error_ops.ignore_errors())\r\n\r\n def testIgnoreErrorsCore(self):\r\n num_outputs = 4\r\n self.run_core_tests(self._build_ds, num_outputs)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Confusion matrix related metrics.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.ops import confusion_matrix as cm\r\n\r\n\r\ndef confusion_matrix(labels, predictions, num_classes=None, dtype=dtypes.int32,\r\n name=None, weights=None):\r\n \"\"\"Deprecated. Use tf.math.confusion_matrix instead.\"\"\"\r\n return cm.confusion_matrix(labels=labels, predictions=predictions,\r\n num_classes=num_classes, dtype=dtype, name=name,\r\n weights=weights)\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Implementation of ClusterResolvers for GCE instance groups.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver\r\nfrom tensorflow.python.training.server_lib import ClusterSpec\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n_GOOGLE_API_CLIENT_INSTALLED = True\r\ntry:\r\n from googleapiclient import discovery # pylint: disable=g-import-not-at-top\r\n from oauth2client.client import GoogleCredentials # pylint: disable=g-import-not-at-top\r\nexcept ImportError:\r\n _GOOGLE_API_CLIENT_INSTALLED = False\r\n\r\n\r\n@tf_export('distribute.cluster_resolver.GCEClusterResolver')\r\nclass GCEClusterResolver(ClusterResolver):\r\n \"\"\"ClusterResolver for Google Compute Engine.\r\n\r\n This is an implementation of cluster resolvers for the Google Compute Engine\r\n instance group platform. By specifying a project, zone, and instance group,\r\n this will retrieve the IP address of all the instances within the instance\r\n group and return a ClusterResolver object suitable for use for distributed\r\n TensorFlow.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n project,\r\n zone,\r\n instance_group,\r\n port,\r\n task_type='worker',\r\n task_id=0,\r\n rpc_layer='grpc',\r\n credentials='default',\r\n service=None):\r\n \"\"\"Creates a new GCEClusterResolver object.\r\n\r\n This takes in a few parameters and creates a GCEClusterResolver project. It\r\n will then use these parameters to query the GCE API for the IP addresses of\r\n each instance in the instance group.\r\n\r\n Args:\r\n project: Name of the GCE project.\r\n zone: Zone of the GCE instance group.\r\n instance_group: Name of the GCE instance group.\r\n port: Port of the listening TensorFlow server (default: 8470)\r\n task_type: Name of the TensorFlow job this GCE instance group of VM\r\n instances belong to.\r\n task_id: The task index for this particular VM, within the GCE\r\n instance group. In particular, every single instance should be assigned\r\n a unique ordinal index within an instance group manually so that they\r\n can be distinguished from each other.\r\n rpc_layer: The RPC layer TensorFlow should use to communicate across\r\n instances.\r\n credentials: GCE Credentials. If nothing is specified, this defaults to\r\n GoogleCredentials.get_application_default().\r\n service: The GCE API object returned by the googleapiclient.discovery\r\n function. (Default: discovery.build('compute', 'v1')). If you specify a\r\n custom service object, then the credentials parameter will be ignored.\r\n\r\n Raises:\r\n ImportError: If the googleapiclient is not installed.\r\n \"\"\"\r\n self._project = project\r\n self._zone = zone\r\n self._instance_group = instance_group\r\n self._task_type = task_type\r\n self._task_id = task_id\r\n self._rpc_layer = rpc_layer\r\n self._port = port\r\n self._credentials = credentials\r\n\r\n if credentials == 'default':\r\n if _GOOGLE_API_CLIENT_INSTALLED:\r\n self._credentials = GoogleCredentials.get_application_default()\r\n\r\n if service is None:\r\n if not _GOOGLE_API_CLIENT_INSTALLED:\r\n raise ImportError('googleapiclient must be installed before using the '\r\n 'GCE cluster resolver')\r\n self._service = discovery.build(\r\n 'compute', 'v1',\r\n credentials=self._credentials)\r\n else:\r\n self._service = service\r\n\r\n def cluster_spec(self):\r\n \"\"\"Returns a ClusterSpec object based on the latest instance group info.\r\n\r\n This returns a ClusterSpec object for use based on information from the\r\n specified instance group. We will retrieve the information from the GCE APIs\r\n every time this method is called.\r\n\r\n Returns:\r\n A ClusterSpec containing host information retrieved from GCE.\r\n \"\"\"\r\n request_body = {'instanceState': 'RUNNING'}\r\n request = self._service.instanceGroups().listInstances(\r\n project=self._project,\r\n zone=self._zone,\r\n instanceGroups=self._instance_group,\r\n body=request_body,\r\n orderBy='name')\r\n\r\n worker_list = []\r\n\r\n while request is not None:\r\n response = request.execute()\r\n\r\n items = response['items']\r\n for instance in items:\r\n instance_name = instance['instance'].split('/')[-1]\r\n\r\n instance_request = self._service.instances().get(\r\n project=self._project,\r\n zone=self._zone,\r\n instance=instance_name)\r\n\r\n if instance_request is not None:\r\n instance_details = instance_request.execute()\r\n ip_address = instance_details['networkInterfaces'][0]['networkIP']\r\n instance_url = '%s:%s' % (ip_address, self._port)\r\n worker_list.append(instance_url)\r\n\r\n request = self._service.instanceGroups().listInstances_next(\r\n previous_request=request,\r\n previous_response=response)\r\n\r\n worker_list.sort()\r\n return ClusterSpec({self._task_type: worker_list})\r\n\r\n def master(self, task_type=None, task_id=None, rpc_layer=None):\r\n task_type = task_type if task_type is not None else self._task_type\r\n task_id = task_id if task_id is not None else self._task_id\r\n\r\n if task_type is not None and task_id is not None:\r\n master = self.cluster_spec().task_address(task_type, task_id)\r\n if rpc_layer or self._rpc_layer:\r\n return '%s://%s' % (rpc_layer or self._rpc_layer, master)\r\n else:\r\n return master\r\n\r\n return ''\r\n\r\n @property\r\n def task_type(self):\r\n return self._task_type\r\n\r\n @property\r\n def task_id(self):\r\n return self._task_id\r\n\r\n @task_type.setter\r\n def task_type(self, task_type):\r\n raise RuntimeError(\r\n 'You cannot reset the task_type of the GCEClusterResolver after it has '\r\n 'been created.')\r\n\r\n @task_id.setter\r\n def task_id(self, task_id):\r\n self._task_id = task_id\r\n\r\n @property\r\n def rpc_layer(self):\r\n return self._rpc_layer\r\n\r\n @rpc_layer.setter\r\n def rpc_layer(self, rpc_layer):\r\n self._rpc_layer = rpc_layer\r\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"A simple functional keras model with one layer.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\n\r\nfrom tensorflow.python import keras\r\nfrom tensorflow.python.distribute.model_collection import model_collection_base\r\nfrom tensorflow.python.eager import def_function\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.keras.optimizer_v2 import gradient_descent\r\nfrom tensorflow.python.module import module\r\nfrom tensorflow.python.ops import variables\r\n\r\n_BATCH_SIZE = 10\r\n\r\n\r\ndef _get_data_for_simple_models():\r\n x_train = constant_op.constant(np.random.rand(1000, 3), dtype=dtypes.float32)\r\n y_train = constant_op.constant(np.random.rand(1000, 5), dtype=dtypes.float32)\r\n x_predict = constant_op.constant(\r\n np.random.rand(1000, 3), dtype=dtypes.float32)\r\n\r\n return x_train, y_train, x_predict\r\n\r\n\r\nclass SimpleFunctionalModel(model_collection_base.ModelAndInput):\r\n \"\"\"A simple functinal model and its inputs.\"\"\"\r\n\r\n def get_model(self, **kwargs):\r\n output_name = 'output_layer'\r\n\r\n x = keras.layers.Input(shape=(3,), dtype=dtypes.float32)\r\n y = keras.layers.Dense(5, dtype=dtypes.float32, name=output_name)(x)\r\n\r\n model = keras.Model(inputs=x, outputs=y)\r\n optimizer = gradient_descent.SGD(learning_rate=0.001)\r\n experimental_run_tf_function = kwargs.pop('experimental_run_tf_function',\r\n None)\r\n assert experimental_run_tf_function is not None\r\n model.compile(\r\n loss='mse',\r\n metrics=['mae'],\r\n optimizer=optimizer,\r\n experimental_run_tf_function=experimental_run_tf_function)\r\n\r\n return model, output_name\r\n\r\n def get_data(self):\r\n return _get_data_for_simple_models()\r\n\r\n def get_batch_size(self):\r\n return _BATCH_SIZE\r\n\r\n\r\nclass SimpleSequentialModel(model_collection_base.ModelAndInput):\r\n \"\"\"A simple sequential model and its inputs.\"\"\"\r\n\r\n def get_model(self, **kwargs):\r\n output_name = 'output_layer'\r\n\r\n model = keras.Sequential()\r\n y = keras.layers.Dense(\r\n 5, dtype=dtypes.float32, name=output_name, input_dim=3)\r\n model.add(y)\r\n optimizer = gradient_descent.SGD(learning_rate=0.001)\r\n experimental_run_tf_function = kwargs.pop('experimental_run_tf_function',\r\n None)\r\n assert experimental_run_tf_function is not None\r\n model.compile(\r\n loss='mse',\r\n metrics=['mae'],\r\n optimizer=optimizer,\r\n experimental_run_tf_function=experimental_run_tf_function)\r\n\r\n return model, output_name\r\n\r\n def get_data(self):\r\n return _get_data_for_simple_models()\r\n\r\n def get_batch_size(self):\r\n return _BATCH_SIZE\r\n\r\n\r\nclass _SimpleModel(keras.Model):\r\n\r\n output_name = 'output_layer'\r\n\r\n def __init__(self):\r\n self._dense_layer = keras.layers.Dense(\r\n 5, dtype=dtypes.float32, name=self.output_name)\r\n\r\n def call(self, inputs):\r\n return self._dense_layer(inputs)\r\n\r\n\r\nclass SimpleSubclassModel(model_collection_base.ModelAndInput):\r\n \"\"\"A simple subclass model and its data.\"\"\"\r\n\r\n def get_model(self, **kwargs):\r\n model = _SimpleModel()\r\n optimizer = gradient_descent.SGD(learning_rate=0.001)\r\n experimental_run_tf_function = kwargs.pop('experimental_run_tf_function',\r\n None)\r\n assert experimental_run_tf_function is not None\r\n model.compile(\r\n loss='mse',\r\n metrics=['mae'],\r\n cloning=False,\r\n optimizer=optimizer,\r\n experimental_run_tf_function=experimental_run_tf_function)\r\n\r\n return model, model.output_name\r\n\r\n def get_data(self):\r\n return _get_data_for_simple_models()\r\n\r\n def get_batch_size(self):\r\n return _BATCH_SIZE\r\n\r\n\r\nclass _SimpleModule(module.Module):\r\n\r\n def __init__(self):\r\n self.v = variables.Variable(3.0)\r\n\r\n @def_function.function\r\n def __call__(self, x):\r\n return self.v * x\r\n\r\n\r\nclass SimpleTFModuleModel(model_collection_base.ModelAndInput):\r\n \"\"\"A simple model based on tf.Module and its data.\"\"\"\r\n\r\n def get_model(self, **kwargs):\r\n model = _SimpleModule()\r\n return model, 'foo'\r\n\r\n def get_data(self):\r\n return _get_data_for_simple_models()\r\n\r\n def get_batch_size(self):\r\n return _BATCH_SIZE\r\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"End-to-end benchmark for batch normalization.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport argparse\r\nimport sys\r\nimport time\r\n\r\nfrom tensorflow.python.client import session as session_lib\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import gen_nn_ops\r\nfrom tensorflow.python.ops import gradients_impl\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import nn_impl\r\nfrom tensorflow.python.ops import random_ops\r\nfrom tensorflow.python.ops import variables\r\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\ndef batch_norm_op(tensor, mean, variance, beta, gamma, scale):\r\n \"\"\"Fused kernel for batch normalization.\"\"\"\r\n # _batch_norm_with_global_normalization is deprecated in v9\r\n test_util.set_producer_version(ops.get_default_graph(), 8)\r\n # pylint: disable=protected-access\r\n return gen_nn_ops._batch_norm_with_global_normalization(\r\n tensor, mean, variance, beta, gamma, 0.001, scale)\r\n # pylint: enable=protected-access\r\n\r\n\r\n# Note that the naive implementation is much slower:\r\n# batch_norm = (tensor - mean) * tf.math.rsqrt(variance + 0.001)\r\n# if scale:\r\n# batch_norm *= gamma\r\n# return batch_norm + beta\r\ndef batch_norm_py(tensor, mean, variance, beta, gamma, scale):\r\n \"\"\"Python implementation of batch normalization.\"\"\"\r\n return nn_impl.batch_normalization(tensor, mean, variance, beta, gamma if\r\n scale else None, 0.001)\r\n\r\n\r\ndef batch_norm_slow(tensor, mean, variance, beta, gamma, scale):\r\n batch_norm = (tensor - mean) * math_ops.rsqrt(variance + 0.001)\r\n if scale:\r\n batch_norm *= gamma\r\n return batch_norm + beta\r\n\r\n\r\ndef build_graph(device, input_shape, axes, num_layers, mode, scale, train):\r\n \"\"\"Build a graph containing a sequence of batch normalizations.\r\n\r\n Args:\r\n device: string, the device to run on.\r\n input_shape: shape of the input tensor.\r\n axes: axes that are to be normalized across.\r\n num_layers: number of batch normalization layers in the graph.\r\n mode: \"op\", \"py\" or \"slow\" depending on the implementation.\r\n scale: scale after normalization.\r\n train: if true, also run backprop.\r\n\r\n Returns:\r\n An array of tensors to run()\r\n \"\"\"\r\n moment_shape = []\r\n keep_dims = mode == \"py\" or mode == \"slow\"\r\n if keep_dims:\r\n for axis in range(len(input_shape)):\r\n if axis in axes:\r\n moment_shape.append(1)\r\n else:\r\n moment_shape.append(input_shape[axis])\r\n else:\r\n for axis in range(len(input_shape)):\r\n if axis not in axes:\r\n moment_shape.append(input_shape[axis])\r\n with ops.device(\"/%s:0\" % device):\r\n tensor = variables.Variable(random_ops.truncated_normal(input_shape))\r\n for _ in range(num_layers):\r\n if train:\r\n mean, variance = nn_impl.moments(tensor, axes, keep_dims=keep_dims)\r\n else:\r\n mean = array_ops.zeros(moment_shape)\r\n variance = array_ops.ones(moment_shape)\r\n beta = variables.Variable(array_ops.zeros(moment_shape))\r\n gamma = variables.Variable(constant_op.constant(1.0, shape=moment_shape))\r\n if mode == \"py\":\r\n tensor = batch_norm_py(tensor, mean, variance, beta, gamma, scale)\r\n elif mode == \"op\":\r\n tensor = batch_norm_op(tensor, mean, variance, beta, gamma, scale)\r\n elif mode == \"slow\":\r\n tensor = batch_norm_slow(tensor, mean, variance, beta, gamma, scale)\r\n if train:\r\n return gradients_impl.gradients([tensor], variables.trainable_variables())\r\n else:\r\n return [tensor]\r\n\r\n\r\ndef print_difference(mode, t1, t2):\r\n \"\"\"Print the difference in timing between two runs.\"\"\"\r\n difference = (t2 - t1) / t1 * 100.0\r\n print(\"=== %s: %.1f%% ===\" % (mode, difference))\r\n\r\n\r\nclass BatchNormBenchmark(test.Benchmark):\r\n \"\"\"Benchmark batch normalization.\"\"\"\r\n\r\n def _run_graph(self, device, input_shape, axes, num_layers, mode, scale,\r\n train, num_iters):\r\n \"\"\"Run the graph and print its execution time.\r\n\r\n Args:\r\n device: string, the device to run on.\r\n input_shape: shape of the input tensor.\r\n axes: axes that are to be normalized across.\r\n num_layers: number of batch normalization layers in the graph.\r\n mode: \"op\", \"py\" or \"slow\" depending on the implementation.\r\n scale: scale after normalization.\r\n train: if true, also run backprop.\r\n num_iters: number of steps to run.\r\n\r\n Returns:\r\n The duration of the run in seconds.\r\n \"\"\"\r\n graph = ops.Graph()\r\n with graph.as_default():\r\n outputs = build_graph(device, input_shape, axes, num_layers, mode, scale,\r\n train)\r\n with session_lib.Session(graph=graph) as session:\r\n variables.global_variables_initializer().run()\r\n _ = session.run([out.op for out in outputs]) # warm up.\r\n start_time = time.time()\r\n for _ in range(num_iters):\r\n _ = session.run([out.op for out in outputs])\r\n duration = time.time() - start_time\r\n print(\"%s shape:%d/%d #layers:%d mode:%s scale:%r train:%r - %f secs\" %\r\n (device, len(input_shape), len(axes), num_layers, mode, scale, train,\r\n duration / num_iters))\r\n\r\n name_template = (\r\n \"batch_norm_{device}_input_shape_{shape}_axes_{axes}_mode_{mode}_\"\r\n \"layers_{num_layers}_scale_{scale}_\"\r\n \"train_{train}\")\r\n\r\n self.report_benchmark(\r\n name=name_template.format(\r\n device=device,\r\n mode=mode,\r\n num_layers=num_layers,\r\n scale=scale,\r\n train=train,\r\n shape=str(input_shape).replace(\" \", \"\"),\r\n axes=str(axes)).replace(\" \", \"\"),\r\n iters=num_iters,\r\n wall_time=duration / num_iters)\r\n\r\n return duration\r\n\r\n def benchmark_batch_norm(self):\r\n print(\"Forward convolution (lower layers).\")\r\n shape = [8, 128, 128, 32]\r\n axes = [0, 1, 2]\r\n t1 = self._run_graph(\"cpu\", shape, axes, 10, \"op\", True, False, 5)\r\n t2 = self._run_graph(\"cpu\", shape, axes, 10, \"py\", True, False, 5)\r\n t3 = self._run_graph(\"cpu\", shape, axes, 10, \"slow\", True, False, 5)\r\n print_difference(\"op vs py\", t1, t2)\r\n print_difference(\"py vs slow\", t2, t3)\r\n if FLAGS.use_gpu:\r\n t1 = self._run_graph(\"gpu\", shape, axes, 10, \"op\", True, False, 50)\r\n t2 = self._run_graph(\"gpu\", shape, axes, 10, \"py\", True, False, 50)\r\n t3 = self._run_graph(\"gpu\", shape, axes, 10, \"slow\", True, False, 50)\r\n print_difference(\"op vs py\", t1, t2)\r\n print_difference(\"py vs slow\", t2, t3)\r\n print(\"Forward/backward convolution (lower layers).\")\r\n t1 = self._run_graph(\"cpu\", shape, axes, 10, \"op\", True, True, 5)\r\n t2 = self._run_graph(\"cpu\", shape, axes, 10, \"py\", True, True, 5)\r\n t3 = self._run_graph(\"cpu\", shape, axes, 10, \"slow\", True, True, 5)\r\n print_difference(\"op vs py\", t1, t2)\r\n print_difference(\"py vs slow\", t2, t3)\r\n if FLAGS.use_gpu:\r\n t1 = self._run_graph(\"gpu\", shape, axes, 10, \"op\", True, True, 50)\r\n t2 = self._run_graph(\"gpu\", shape, axes, 10, \"py\", True, True, 50)\r\n t3 = self._run_graph(\"gpu\", shape, axes, 10, \"slow\", True, True, 50)\r\n print_difference(\"op vs py\", t1, t2)\r\n print_difference(\"py vs slow\", t2, t3)\r\n print(\"Forward convolution (higher layers).\")\r\n shape = [256, 17, 17, 32]\r\n axes = [0, 1, 2]\r\n t1 = self._run_graph(\"cpu\", shape, axes, 10, \"op\", True, False, 5)\r\n t2 = self._run_graph(\"cpu\", shape, axes, 10, \"py\", True, False, 5)\r\n t3 = self._run_graph(\"cpu\", shape, axes, 10, \"slow\", True, False, 5)\r\n print_difference(\"op vs py\", t1, t2)\r\n print_difference(\"py vs slow\", t2, t3)\r\n if FLAGS.use_gpu:\r\n t1 = self._run_graph(\"gpu\", shape, axes, 10, \"op\", True, False, 50)\r\n t2 = self._run_graph(\"gpu\", shape, axes, 10, \"py\", True, False, 50)\r\n t3 = self._run_graph(\"gpu\", shape, axes, 10, \"slow\", True, False, 50)\r\n print_difference(\"op vs py\", t1, t2)\r\n print_difference(\"py vs slow\", t2, t3)\r\n print(\"Forward/backward convolution (higher layers).\")\r\n t1 = self._run_graph(\"cpu\", shape, axes, 10, \"op\", True, True, 5)\r\n t2 = self._run_graph(\"cpu\", shape, axes, 10, \"py\", True, True, 5)\r\n t3 = self._run_graph(\"cpu\", shape, axes, 10, \"slow\", True, True, 5)\r\n print_difference(\"op vs py\", t1, t2)\r\n print_difference(\"py vs slow\", t2, t3)\r\n if FLAGS.use_gpu:\r\n t1 = self._run_graph(\"gpu\", shape, axes, 10, \"op\", True, True, 50)\r\n t2 = self._run_graph(\"gpu\", shape, axes, 10, \"py\", True, True, 50)\r\n t3 = self._run_graph(\"gpu\", shape, axes, 10, \"slow\", True, True, 50)\r\n print_difference(\"op vs py\", t1, t2)\r\n print_difference(\"py vs slow\", t2, t3)\r\n print(\"Forward fully-connected.\")\r\n shape = [1024, 32]\r\n axes = [0]\r\n t1 = self._run_graph(\"cpu\", shape, axes, 10, \"py\", True, False, 5)\r\n t2 = self._run_graph(\"cpu\", shape, axes, 10, \"slow\", True, False, 5)\r\n print_difference(\"py vs slow\", t1, t2)\r\n if FLAGS.use_gpu:\r\n t1 = self._run_graph(\"gpu\", shape, axes, 10, \"py\", True, False, 50)\r\n t2 = self._run_graph(\"gpu\", shape, axes, 10, \"slow\", True, False, 50)\r\n print_difference(\"py vs slow\", t1, t2)\r\n print(\"Forward/backward fully-connected.\")\r\n t1 = self._run_graph(\"cpu\", shape, axes, 10, \"py\", True, True, 50)\r\n t2 = self._run_graph(\"cpu\", shape, axes, 10, \"slow\", True, True, 50)\r\n print_difference(\"py vs slow\", t1, t2)\r\n if FLAGS.use_gpu:\r\n t1 = self._run_graph(\"gpu\", shape, axes, 10, \"py\", True, True, 5)\r\n t2 = self._run_graph(\"gpu\", shape, axes, 10, \"slow\", True, True, 5)\r\n print_difference(\"py vs slow\", t1, t2)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\r\n parser.add_argument(\r\n \"--use_gpu\",\r\n type=\"bool\",\r\n nargs=\"?\",\r\n const=True,\r\n default=True,\r\n help=\"Run GPU benchmarks.\"\r\n )\r\n global FLAGS # pylint:disable=global-at-module-level\r\n FLAGS, unparsed = parser.parse_known_args()\r\n test.main(argv=[sys.argv[0]] + unparsed)\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"AdaMax for TensorFlow.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.eager import context\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import resource_variable_ops\r\nfrom tensorflow.python.ops import state_ops\r\nfrom tensorflow.python.training import adam\r\nfrom tensorflow.python.training import training_ops\r\n\r\n\r\nclass AdaMaxOptimizer(adam.AdamOptimizer):\r\n \"\"\"Optimizer that implements the AdaMax algorithm.\r\n\r\n Adamax is sometimes superior to adam, specially in models with embeddings,\r\n see [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)\r\n ([pdf](http://arxiv.org/pdf/1412.6980.pdf)).\r\n \"\"\"\r\n\r\n def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,\r\n use_locking=False, name=\"AdaMax\"):\r\n \"\"\"Construct a new AdaMax optimizer.\r\n\r\n Initialization:\r\n\r\n ```\r\n m_0 <- 0 (Initialize initial 1st moment vector)\r\n v_0 <- 0 (Initialize the exponentially weighted infinity norm)\r\n t <- 0 (Initialize timestep)\r\n ```\r\n\r\n The update rule for `variable` with gradient `g` uses an optimization\r\n described at the end of section 7.1 of the paper:\r\n\r\n ```\r\n t <- t + 1\r\n\r\n m_t <- beta1 * m_{t-1} + (1 - beta1) * g\r\n v_t <- max(beta2 * v_{t-1}, abs(g))\r\n variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)\r\n ```\r\n\r\n Similar to AdamOptimizer, the epsilon is added for numerical stability\r\n (especially to get rid of division by zero when v_t = 0).\r\n\r\n Contrast to AdamOptimizer, the sparse implementation of this algorithm\r\n (used when the gradient is an IndexedSlices object, typically because of\r\n `tf.gather` or an embedding lookup in the forward pass) only updates\r\n variable slices and corresponding `m_t`, `v_t` terms when that part of\r\n the variable was used in the forward pass. This means that the sparse\r\n behavior is contrast to the dense behavior (similar to some momentum\r\n implementations which ignore momentum unless a variable slice was actually\r\n used).\r\n\r\n Args:\r\n learning_rate: A Tensor or a floating point value. The learning rate.\r\n beta1: A float value or a constant float tensor.\r\n The exponential decay rate for the 1st moment estimates.\r\n beta2: A float value or a constant float tensor.\r\n The exponential decay rate for the exponentially weighted infinity norm.\r\n epsilon: A small constant for numerical stability.\r\n use_locking: If True use locks for update operations.\r\n name: Optional name for the operations created when applying gradients.\r\n Defaults to \"AdaMax\".\r\n \"\"\"\r\n super(AdaMaxOptimizer, self).__init__(learning_rate, beta1, beta2,\r\n epsilon, use_locking, name)\r\n\r\n def _get_beta_accumulators(self):\r\n if context.executing_eagerly():\r\n graph = None\r\n else:\r\n graph = ops.get_default_graph()\r\n return self._get_non_slot_variable(\"beta1_power\", graph=graph)\r\n\r\n def _create_slots(self, var_list):\r\n # Create the beta1 accumulators on the same device as the first\r\n # variable. Sort the var_list to make sure this device is consistent across\r\n # workers (these need to go on the same PS, otherwise some updates are\r\n # silently ignored).\r\n first_var = min(var_list, key=lambda x: x.name)\r\n self._create_non_slot_variable(initial_value=self._beta1,\r\n name=\"beta1_power\",\r\n colocate_with=first_var)\r\n\r\n # Create slots for the first and second moments.\r\n for v in var_list:\r\n self._zeros_slot(v, \"m\", self._name)\r\n self._zeros_slot(v, \"v\", self._name)\r\n\r\n def _apply_dense(self, grad, var):\r\n m = self.get_slot(var, \"m\")\r\n v = self.get_slot(var, \"v\")\r\n beta1_power = self._get_beta_accumulators()\r\n return training_ops.apply_ada_max(\r\n var, m, v,\r\n math_ops.cast(beta1_power, var.dtype.base_dtype),\r\n math_ops.cast(self._lr_t, var.dtype.base_dtype),\r\n math_ops.cast(self._beta1_t, var.dtype.base_dtype),\r\n math_ops.cast(self._beta2_t, var.dtype.base_dtype),\r\n math_ops.cast(self._epsilon_t, var.dtype.base_dtype),\r\n grad, use_locking=self._use_locking).op\r\n\r\n def _resource_apply_dense(self, grad, var):\r\n m = self.get_slot(var, \"m\")\r\n v = self.get_slot(var, \"v\")\r\n beta1_power = self._get_beta_accumulators()\r\n return training_ops.resource_apply_ada_max(\r\n var.handle, m.handle, v.handle,\r\n math_ops.cast(beta1_power, grad.dtype.base_dtype),\r\n math_ops.cast(self._lr_t, grad.dtype.base_dtype),\r\n math_ops.cast(self._beta1_t, grad.dtype.base_dtype),\r\n math_ops.cast(self._beta2_t, grad.dtype.base_dtype),\r\n math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),\r\n grad, use_locking=self._use_locking)\r\n\r\n def _apply_sparse_shared(self, grad, var, indices,\r\n scatter_add, scatter_update):\r\n beta1_power = self._get_beta_accumulators()\r\n beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)\r\n lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)\r\n beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)\r\n beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)\r\n epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)\r\n # m_t = beta1 * m + (1 - beta1) * g_t\r\n m = self.get_slot(var, \"m\")\r\n m_slice = array_ops.gather(m, indices)\r\n m_t_slice = m_slice * beta1_t + grad * (1 - beta1_t)\r\n with ops.control_dependencies([m_t_slice]):\r\n m_t = scatter_update(m, indices, m_t_slice)\r\n # u_t = max(beta2 * u, abs(g_t))\r\n v = self.get_slot(var, \"v\")\r\n v_slice = array_ops.gather(v, indices)\r\n v_t_slice = math_ops.maximum(v_slice * beta2_t, math_ops.abs(grad))\r\n with ops.control_dependencies([v_t_slice]):\r\n v_t = scatter_update(v, indices, v_t_slice)\r\n # theta_t = theta - lr / (1 - beta1^t) * m_t / u_t\r\n var_slice = -lr_t / (1 - beta1_power) * (m_t_slice /\r\n (v_t_slice + epsilon_t))\r\n with ops.control_dependencies([var_slice]):\r\n var_update = scatter_add(var, indices, var_slice)\r\n return control_flow_ops.group(*[var_update, m_t, v_t])\r\n\r\n def _apply_sparse(self, grad, var):\r\n return self._apply_sparse_shared(\r\n grad.values, var, grad.indices,\r\n lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda\r\n x, i, v, use_locking=self._use_locking),\r\n lambda x, i, v: state_ops.scatter_update( # pylint: disable=g-long-lambda\r\n x, i, v, use_locking=self._use_locking))\r\n\r\n def _resource_scatter_update(self, x, i, v):\r\n with ops.control_dependencies(\r\n [resource_variable_ops.resource_scatter_update(\r\n x.handle, i, v)]):\r\n return x.value()\r\n\r\n def _resource_apply_sparse(self, grad, var, indices):\r\n return self._apply_sparse_shared(\r\n grad, var, indices,\r\n self._resource_scatter_add, self._resource_scatter_update)\r\n\r\n def _finish(self, update_ops, name_scope):\r\n # Update the power accumulators.\r\n with ops.control_dependencies(update_ops):\r\n beta1_power = self._get_beta_accumulators()\r\n with ops.colocate_with(beta1_power):\r\n update_beta1 = beta1_power.assign(\r\n beta1_power * self._beta1_t, use_locking=self._use_locking)\r\n return control_flow_ops.group(*update_ops + [update_beta1],\r\n name=name_scope)\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Test for checking stats accumulator related ops.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.platform import googletest\r\n\r\n\r\nclass StatsAccumulatorScalarTest(test_util.TensorFlowTestCase):\r\n \"\"\"Tests for scalar gradients and hessians accumulator.\"\"\"\r\n\r\n def testSimpleAcculumator(self):\r\n with self.cached_session() as sess:\r\n accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=tensor_shape.TensorShape([]),\r\n hessian_shape=tensor_shape.TensorShape([]))\r\n with ops.control_dependencies([accumulator.initializer]):\r\n op1 = accumulator.add(\r\n stamp_token=0,\r\n partition_ids=[1, 2],\r\n feature_ids=[[2, 0], [3, 0]],\r\n gradients=[0.1, 0.3],\r\n hessians=[0.2, 0.4])\r\n op2 = accumulator.add(0, [1], [[2, 0]], [0.1], [0.2])\r\n\r\n with ops.control_dependencies([op1, op2]):\r\n num_updates, partition, bucket_ids, grads, hessians = accumulator.flush(\r\n stamp_token=0, next_stamp_token=1)\r\n num_updates, partition, bucket_ids, grads, hessians = sess.run(\r\n [num_updates, partition, bucket_ids, grads, hessians])\r\n\r\n result = _AccumulatorResultToDict(partition, bucket_ids, grads, hessians)\r\n self.assertEqual(num_updates, 2)\r\n self.assertEqual(len(result), 2)\r\n # Key is partition, bucket, dimension\r\n self.assertAllClose(result[(1, 2, 0)], [0.2, 0.4])\r\n self.assertAllClose(result[(2, 3, 0)], [0.3, 0.4])\r\n\r\n def testMultidimensionalAcculumator(self):\r\n with self.cached_session() as sess:\r\n accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=tensor_shape.TensorShape([]),\r\n hessian_shape=tensor_shape.TensorShape([]))\r\n with ops.control_dependencies([accumulator.initializer]):\r\n op1 = accumulator.add(\r\n stamp_token=0,\r\n partition_ids=[1, 2, 1],\r\n feature_ids=[[2, 2], [3, 0], [2, 2]],\r\n gradients=[0.1, 0.3, 0.8],\r\n hessians=[0.2, 0.4, -9])\r\n op2 = accumulator.add(0, [2, 1], [[3, 1], [2, 2]], [0.1, 1], [0.2, -1])\r\n\r\n with ops.control_dependencies([op1, op2]):\r\n num_updates, partition, bucket_ids, grads, hessians = accumulator.flush(\r\n stamp_token=0, next_stamp_token=1)\r\n num_updates, partition, bucket_ids, grads, hessians = sess.run(\r\n [num_updates, partition, bucket_ids, grads, hessians])\r\n\r\n result = _AccumulatorResultToDict(partition, bucket_ids, grads, hessians)\r\n self.assertEqual(num_updates, 2)\r\n self.assertEqual(len(result), 3)\r\n # Key is partition, bucket, dimension.\r\n self.assertAllClose(result[(1, 2, 2)], [1.9, -9.8])\r\n self.assertAllClose(result[(2, 3, 0)], [0.3, 0.4])\r\n self.assertAllClose(result[(2, 3, 1)], [0.1, 0.2])\r\n\r\n def testDropStaleUpdate(self):\r\n with self.cached_session() as sess:\r\n accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=tensor_shape.TensorShape([]),\r\n hessian_shape=tensor_shape.TensorShape([]))\r\n with ops.control_dependencies([accumulator.initializer]):\r\n op1 = accumulator.add(\r\n stamp_token=0,\r\n partition_ids=[1, 2],\r\n feature_ids=[[2, 0], [3, 0]],\r\n gradients=[0.1, 0.3],\r\n hessians=[0.2, 0.4])\r\n op2 = accumulator.add(\r\n stamp_token=-1,\r\n partition_ids=[1],\r\n feature_ids=[[2, 0]],\r\n gradients=[0.1],\r\n hessians=[0.2])\r\n\r\n with ops.control_dependencies([op1, op2]):\r\n num_updates, partition, feature, grads, hessians = accumulator.flush(\r\n stamp_token=0, next_stamp_token=1)\r\n num_updates, partition, feature, grads, hessians = sess.run(\r\n [num_updates, partition, feature, grads, hessians])\r\n\r\n result = _AccumulatorResultToDict(partition, feature, grads, hessians)\r\n self.assertEqual(num_updates, 1)\r\n self.assertEqual(len(result), 2)\r\n self.assertAllClose(result[(1, 2, 0)], [0.1, 0.2])\r\n self.assertAllClose(result[(2, 3, 0)], [0.3, 0.4])\r\n\r\n def testSerialize(self):\r\n with self.cached_session() as sess:\r\n accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=tensor_shape.TensorShape([]),\r\n hessian_shape=tensor_shape.TensorShape([]))\r\n with ops.control_dependencies([accumulator.initializer]):\r\n op1 = accumulator.add(\r\n stamp_token=0,\r\n partition_ids=[1, 2],\r\n feature_ids=[[2, 0], [3, 0]],\r\n gradients=[0.1, 0.3],\r\n hessians=[0.2, 0.4])\r\n\r\n with ops.control_dependencies([op1]):\r\n (stamp_token, num_updates, partition_1, feature_1, grads_1,\r\n hessians_1) = accumulator.saveable.serialize()\r\n # Make sure that the accumulator hasn't changed during serialization.\r\n with ops.control_dependencies([stamp_token]):\r\n num_updates_2, partition_2, feature_2, grads_2, hessians_2 = (\r\n accumulator.flush(stamp_token=0, next_stamp_token=1))\r\n (stamp_token, num_updates, partition_1, feature_1, grads_1, hessians_1,\r\n num_updates_2, partition_2, feature_2, grads_2, hessians_2) = sess.run(\r\n [\r\n stamp_token, num_updates, partition_1, feature_1, grads_1,\r\n hessians_1, num_updates_2, partition_2, feature_2, grads_2,\r\n hessians_2\r\n ])\r\n\r\n result_1 = _AccumulatorResultToDict(partition_1, feature_1, grads_1,\r\n hessians_1)\r\n result_2 = _AccumulatorResultToDict(partition_2, feature_2, grads_2,\r\n hessians_2)\r\n self.assertEqual(num_updates, 1)\r\n self.assertEqual(num_updates_2, 1)\r\n self.assertEqual(len(result_1), 2)\r\n self.assertAllClose(result_1[(1, 2, 0)], [0.1, 0.2])\r\n self.assertAllClose(result_1[(2, 3, 0)], [0.3, 0.4])\r\n self.assertAllEqual(result_1, result_2)\r\n self.assertEqual(0, stamp_token)\r\n\r\n def testDeserialize(self):\r\n with self.cached_session() as sess:\r\n accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=tensor_shape.TensorShape([]),\r\n hessian_shape=tensor_shape.TensorShape([]))\r\n with ops.control_dependencies([accumulator.initializer]):\r\n # These will be deleted due to deserialize call.\r\n op1 = accumulator.add(\r\n stamp_token=0,\r\n partition_ids=[1, 2],\r\n feature_ids=[[2, 0], [3, 1]],\r\n gradients=[0.1, 0.3],\r\n hessians=[0.2, 0.4])\r\n\r\n with ops.control_dependencies([op1]):\r\n deserialize = (\r\n accumulator.saveable.deserialize(\r\n stamp_token=2,\r\n num_updates=3,\r\n partition_ids=[3, 4],\r\n feature_ids=[[5, 0], [6, 2]],\r\n gradients=[0.4, 0.5],\r\n hessians=[0.6, 0.7]))\r\n with ops.control_dependencies([deserialize]):\r\n num_updates, partition, feature, grads, hessians = accumulator.flush(\r\n stamp_token=2, next_stamp_token=3)\r\n num_updates, partition, feature, grads, hessians = sess.run(\r\n [num_updates, partition, feature, grads, hessians])\r\n\r\n result = _AccumulatorResultToDict(partition, feature, grads,\r\n hessians)\r\n self.assertEqual(num_updates, 3)\r\n self.assertEqual(len(result), 2)\r\n self.assertAllClose(result[(3, 5, 0)], [0.4, 0.6])\r\n self.assertAllClose(result[(4, 6, 2)], [0.5, 0.7])\r\n\r\n def testMakeSummary(self):\r\n with self.cached_session() as sess:\r\n accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=tensor_shape.TensorShape([]),\r\n hessian_shape=tensor_shape.TensorShape([]))\r\n partition, feature, grads, hessians = accumulator._make_summary(\r\n partition_ids=[1, 2, 1],\r\n feature_ids=[[2, 0], [3, 1], [2, 0]],\r\n gradients=[0.1, 0.3, 0.1],\r\n hessians=[0.2, 0.4, 0.2])\r\n partition, feature, grads, hessians = sess.run(\r\n [partition, feature, grads, hessians])\r\n result = _AccumulatorResultToDict(partition, feature, grads, hessians)\r\n self.assertEqual(len(result), 2)\r\n self.assertAllClose(result[(1, 2, 0)], [0.2, 0.4])\r\n self.assertAllClose(result[(2, 3, 1)], [0.3, 0.4])\r\n\r\n\r\nclass StatsAccumulatorTensorTest(test_util.TensorFlowTestCase):\r\n \"\"\"Tests for tensor gradients and hessians accumulator.\"\"\"\r\n\r\n def testSimpleAcculumator(self):\r\n with self.cached_session() as sess:\r\n accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=tensor_shape.TensorShape([2]),\r\n hessian_shape=tensor_shape.TensorShape([2, 2]))\r\n with ops.control_dependencies([accumulator.initializer]):\r\n op1 = accumulator.add(\r\n stamp_token=0,\r\n partition_ids=[1, 2],\r\n feature_ids=[[2, 0], [3, 0]],\r\n # Two values for gradients,\r\n gradients=[[0.1, 0.1], [0.2, 0.2]],\r\n # A 2x2 matrix for each hessian.\r\n hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,\r\n 0.08]]])\r\n op2 = accumulator.add(\r\n stamp_token=0,\r\n partition_ids=[1],\r\n feature_ids=[[2, 0]],\r\n gradients=[[0.10, 0.11]],\r\n hessians=[[[0.011, 0.022], [0.033, 0.044]]])\r\n\r\n with ops.control_dependencies([op1, op2]):\r\n num_updates, partition, feature, grads, hessians = accumulator.flush(\r\n stamp_token=0, next_stamp_token=1)\r\n num_updates, partition, feature, grads, hessians = sess.run(\r\n [num_updates, partition, feature, grads, hessians])\r\n\r\n result = _AccumulatorResultToDict(partition, feature, grads, hessians)\r\n self.assertEqual(num_updates, 2)\r\n self.assertEqual(len(result), 2)\r\n self.assertAllClose(result[(1, 2, 0)][0], [0.20, 0.21])\r\n self.assertAllClose(result[(1, 2, 0)][1],\r\n [[0.021, 0.042], [0.063, 0.084]])\r\n self.assertAllClose(result[(2, 3, 0)][0], [0.2, 0.2])\r\n self.assertAllClose(result[(2, 3, 0)][1], [[0.05, 0.06], [0.07, 0.08]])\r\n\r\n def testMultidimensionalAcculumator(self):\r\n with self.cached_session() as sess:\r\n accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=tensor_shape.TensorShape([2]),\r\n hessian_shape=tensor_shape.TensorShape([2, 2]))\r\n with ops.control_dependencies([accumulator.initializer]):\r\n op1 = accumulator.add(\r\n stamp_token=0,\r\n partition_ids=[1, 2],\r\n feature_ids=[[2, 4], [3, 1]],\r\n # Two values for gradients,\r\n gradients=[[0.1, 0.1], [0.2, 0.2]],\r\n # A 2x2 matrix for each hessian.\r\n hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,\r\n 0.08]]])\r\n op2 = accumulator.add(\r\n stamp_token=0,\r\n partition_ids=[1],\r\n feature_ids=[[2, 4]],\r\n gradients=[[0.10, 0.11]],\r\n hessians=[[[0.011, 0.022], [0.033, 0.044]]])\r\n\r\n with ops.control_dependencies([op1, op2]):\r\n num_updates, partition, feature, grads, hessians = accumulator.flush(\r\n stamp_token=0, next_stamp_token=1)\r\n num_updates, partition, feature, grads, hessians = sess.run(\r\n [num_updates, partition, feature, grads, hessians])\r\n\r\n result = _AccumulatorResultToDict(partition, feature, grads, hessians)\r\n self.assertEqual(num_updates, 2)\r\n self.assertEqual(len(result), 2)\r\n self.assertAllClose(result[(1, 2, 4)][0], [0.20, 0.21])\r\n self.assertAllClose(result[(1, 2, 4)][1],\r\n [[0.021, 0.042], [0.063, 0.084]])\r\n self.assertAllClose(result[(2, 3, 1)][0], [0.2, 0.2])\r\n self.assertAllClose(result[(2, 3, 1)][1], [[0.05, 0.06], [0.07, 0.08]])\r\n\r\n def testDropStaleUpdate(self):\r\n with self.cached_session() as sess:\r\n accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=tensor_shape.TensorShape([2]),\r\n hessian_shape=tensor_shape.TensorShape([2, 2]))\r\n with ops.control_dependencies([accumulator.initializer]):\r\n op1 = accumulator.add(\r\n stamp_token=0,\r\n partition_ids=[1, 2],\r\n feature_ids=[[2, 5], [3, 0]],\r\n # Two values for gradients,\r\n gradients=[[0.1, 0.1], [0.2, 0.2]],\r\n # A 2x2 matrix for each hessian.\r\n hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,\r\n 0.08]]])\r\n op2 = accumulator.add(\r\n stamp_token=-1,\r\n partition_ids=[1],\r\n feature_ids=[[2, 5]],\r\n gradients=[[0.10, 0.11]],\r\n hessians=[[[0.011, 0.022], [0.033, 0.044]]])\r\n\r\n with ops.control_dependencies([op1, op2]):\r\n num_updates, partition, feature, grads, hessians = accumulator.flush(\r\n stamp_token=0, next_stamp_token=1)\r\n num_updates, partition, feature, grads, hessians = sess.run(\r\n [num_updates, partition, feature, grads, hessians])\r\n\r\n result = _AccumulatorResultToDict(partition, feature, grads, hessians)\r\n self.assertEqual(num_updates, 1)\r\n self.assertEqual(len(result), 2)\r\n self.assertAllClose(result[(1, 2, 5)][0], [0.1, 0.1])\r\n self.assertAllClose(result[(1, 2, 5)][1], [[0.01, 0.02], [0.03, 0.04]])\r\n self.assertAllClose(result[(2, 3, 0)][0], [0.2, 0.2])\r\n self.assertAllClose(result[(2, 3, 0)][1], [[0.05, 0.06], [0.07, 0.08]])\r\n\r\n def testSerialize(self):\r\n with self.cached_session() as sess:\r\n accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=tensor_shape.TensorShape([2]),\r\n hessian_shape=tensor_shape.TensorShape([2, 2]))\r\n with ops.control_dependencies([accumulator.initializer]):\r\n op1 = accumulator.add(\r\n stamp_token=0,\r\n partition_ids=[1, 2],\r\n feature_ids=[[2, 0], [3, 0]],\r\n # Two values for gradients,\r\n gradients=[[0.1, 0.1], [0.2, 0.2]],\r\n # A 2x2 matrix for each hessian.\r\n hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,\r\n 0.08]]])\r\n\r\n with ops.control_dependencies([op1]):\r\n (stamp_token, num_updates_1, partition_1, feature_1, grads_1,\r\n hessians_1) = accumulator.saveable.serialize()\r\n # Make sure that the accumulator hasn't changed during serialization.\r\n with ops.control_dependencies([stamp_token]):\r\n num_updates_2, partition_2, feature_2, grads_2, hessians_2 = (\r\n accumulator.flush(stamp_token=0, next_stamp_token=1))\r\n (stamp_token, num_updates_1, partition_1, feature_1, grads_1,\r\n hessians_1, num_updates_2, partition_2, feature_2, grads_2,\r\n hessians_2) = sess.run([\r\n stamp_token, num_updates_1, partition_1, feature_1, grads_1,\r\n hessians_1, num_updates_2, partition_2, feature_2, grads_2,\r\n hessians_2\r\n ])\r\n\r\n result_1 = _AccumulatorResultToDict(partition_1, feature_1, grads_1,\r\n hessians_1)\r\n result_2 = _AccumulatorResultToDict(partition_2, feature_2, grads_2,\r\n hessians_2)\r\n\r\n self.assertEqual(num_updates_1, 1)\r\n self.assertEqual(num_updates_2, 1)\r\n self.assertEqual(len(result_1), 2)\r\n self.assertAllClose(result_1[(1, 2, 0)][0], [0.1, 0.1])\r\n self.assertAllClose(result_1[(1, 2, 0)][1], [[0.01, 0.02], [0.03, 0.04]])\r\n self.assertAllClose(result_1[(2, 3, 0)][0], [0.2, 0.2])\r\n self.assertAllClose(result_1[(2, 3, 0)][1], [[0.05, 0.06], [0.07, 0.08]])\r\n\r\n self.assertAllEqual(result_1[1, 2, 0][0], result_2[1, 2, 0][0])\r\n self.assertAllEqual(result_1[1, 2, 0][1], result_2[1, 2, 0][1])\r\n self.assertAllEqual(result_1[2, 3, 0][0], result_2[2, 3, 0][0])\r\n self.assertAllEqual(result_1[2, 3, 0][1], result_2[2, 3, 0][1])\r\n\r\n def testDeserialize(self):\r\n with self.cached_session() as sess:\r\n accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=tensor_shape.TensorShape([2]),\r\n hessian_shape=tensor_shape.TensorShape([2, 2]))\r\n with ops.control_dependencies([accumulator.initializer]):\r\n # These will be deleted due to deserialize call.\r\n op1 = accumulator.add(\r\n stamp_token=0,\r\n partition_ids=[1, 2],\r\n feature_ids=[[2, 0], [3, 0]],\r\n # Two values for gradients,\r\n gradients=[[0.1, 0.1], [0.2, 0.2]],\r\n # A 2x2 matrix for each hessian.\r\n hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,\r\n 0.08]]])\r\n\r\n with ops.control_dependencies([op1]):\r\n deserialize = accumulator.saveable.deserialize(\r\n stamp_token=2,\r\n num_updates=3,\r\n partition_ids=[3, 4],\r\n feature_ids=[[4, 0], [5, 0]],\r\n # Two values for gradients,\r\n gradients=[[0.3, 0.3], [0.5, 0.5]],\r\n # A 2x2 matrix for each hessian.\r\n hessians=[[[0.03, 0.04], [0.05, 0.06]], [[0.07, 0.08], [0.09,\r\n 0.10]]])\r\n with ops.control_dependencies([deserialize]):\r\n num_updates, partition, feature, grads, hessians = accumulator.flush(\r\n stamp_token=2, next_stamp_token=3)\r\n num_updates, partition, feature, grads, hessians = sess.run(\r\n [num_updates, partition, feature, grads, hessians])\r\n\r\n result = _AccumulatorResultToDict(partition, feature, grads,\r\n hessians)\r\n self.assertEqual(num_updates, 3)\r\n self.assertEqual(len(result), 2)\r\n self.assertAllClose(result[(3, 4, 0)][0], [0.3, 0.3])\r\n self.assertAllClose(result[(3, 4, 0)][1], [[0.03, 0.04], [0.05, 0.06]])\r\n self.assertAllClose(result[(4, 5, 0)][0], [0.5, 0.5])\r\n self.assertAllClose(result[(4, 5, 0)][1], [[0.07, 0.08], [0.09, 0.10]])\r\n\r\n def testMakeSummary(self):\r\n with self.cached_session() as sess:\r\n accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=tensor_shape.TensorShape([2]),\r\n hessian_shape=tensor_shape.TensorShape([2, 2]))\r\n partition, feature, grads, hessians = accumulator._make_summary(\r\n partition_ids=[1, 2, 1],\r\n feature_ids=[[2, 0], [3, 2], [2, 0]],\r\n # Two values for gradients,\r\n gradients=[[0.1, 0.1], [0.2, 0.2], [0.10, 0.11]],\r\n # A 2x2 matrix for each hessian.\r\n hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07, 0.08]],\r\n [[0.011, 0.022], [0.033, 0.044]]])\r\n partition, feature, grads, hessians = sess.run(\r\n [partition, feature, grads, hessians])\r\n\r\n result = _AccumulatorResultToDict(partition, feature, grads, hessians)\r\n self.assertEqual(len(result), 2)\r\n self.assertAllClose(result[(1, 2, 0)][0], [0.20, 0.21])\r\n self.assertAllClose(result[(1, 2, 0)][1],\r\n [[0.021, 0.042], [0.063, 0.084]])\r\n self.assertAllClose(result[(2, 3, 2)][0], [0.2, 0.2])\r\n self.assertAllClose(result[(2, 3, 2)][1], [[0.05, 0.06], [0.07, 0.08]])\r\n\r\n\r\ndef _AccumulatorResultToDict(partition, feature, grads, hessians):\r\n \"\"\"Converts the inputs to a dictionary since the ordering changes.\"\"\"\r\n return {(partition[i], feature[i, 0], feature[i, 1]): (grads[i], hessians[i])\r\n for i in range(len(partition))}\r\n\r\n\r\nif __name__ == \"__main__\":\r\n googletest.main()\r\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for initializers.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport importlib\r\nimport math\r\n\r\nimport numpy as np\r\n\r\nfrom tensorflow.python.eager import backprop\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import gradients_impl\r\nfrom tensorflow.python.ops import nn_ops\r\nfrom tensorflow.python.ops import variables\r\nfrom tensorflow.python.ops.distributions import kullback_leibler\r\nfrom tensorflow.python.ops.distributions import normal as normal_lib\r\nfrom tensorflow.python.platform import test\r\nfrom tensorflow.python.platform import tf_logging\r\n\r\n\r\ndef try_import(name): # pylint: disable=invalid-name\r\n module = None\r\n try:\r\n module = importlib.import_module(name)\r\n except ImportError as e:\r\n tf_logging.warning(\"Could not import %s: %s\" % (name, str(e)))\r\n return module\r\n\r\nstats = try_import(\"scipy.stats\")\r\n\r\n\r\nclass NormalTest(test.TestCase):\r\n\r\n def setUp(self):\r\n self._rng = np.random.RandomState(123)\r\n\r\n def assertAllFinite(self, tensor):\r\n is_finite = np.isfinite(self.evaluate(tensor))\r\n all_true = np.ones_like(is_finite, dtype=np.bool)\r\n self.assertAllEqual(all_true, is_finite)\r\n\r\n def _testParamShapes(self, sample_shape, expected):\r\n param_shapes = normal_lib.Normal.param_shapes(sample_shape)\r\n mu_shape, sigma_shape = param_shapes[\"loc\"], param_shapes[\"scale\"]\r\n self.assertAllEqual(expected, self.evaluate(mu_shape))\r\n self.assertAllEqual(expected, self.evaluate(sigma_shape))\r\n mu = array_ops.zeros(mu_shape)\r\n sigma = array_ops.ones(sigma_shape)\r\n self.assertAllEqual(\r\n expected,\r\n self.evaluate(array_ops.shape(normal_lib.Normal(mu, sigma).sample())))\r\n\r\n def _testParamStaticShapes(self, sample_shape, expected):\r\n param_shapes = normal_lib.Normal.param_static_shapes(sample_shape)\r\n mu_shape, sigma_shape = param_shapes[\"loc\"], param_shapes[\"scale\"]\r\n self.assertEqual(expected, mu_shape)\r\n self.assertEqual(expected, sigma_shape)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testSampleLikeArgsGetDistDType(self):\r\n dist = normal_lib.Normal(0., 1.)\r\n self.assertEqual(dtypes.float32, dist.dtype)\r\n for method in (\"log_prob\", \"prob\", \"log_cdf\", \"cdf\",\r\n \"log_survival_function\", \"survival_function\", \"quantile\"):\r\n self.assertEqual(dtypes.float32, getattr(dist, method)(1).dtype)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testParamShapes(self):\r\n sample_shape = [10, 3, 4]\r\n self._testParamShapes(sample_shape, sample_shape)\r\n self._testParamShapes(constant_op.constant(sample_shape), sample_shape)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testParamStaticShapes(self):\r\n sample_shape = [10, 3, 4]\r\n self._testParamStaticShapes(sample_shape, sample_shape)\r\n self._testParamStaticShapes(\r\n tensor_shape.TensorShape(sample_shape), sample_shape)\r\n\r\n @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)\r\n def testNormalWithSoftplusScale(self):\r\n mu = array_ops.zeros((10, 3))\r\n rho = array_ops.ones((10, 3)) * -2.\r\n normal = normal_lib.NormalWithSoftplusScale(loc=mu, scale=rho)\r\n self.assertAllEqual(self.evaluate(mu), self.evaluate(normal.loc))\r\n self.assertAllEqual(\r\n self.evaluate(nn_ops.softplus(rho)), self.evaluate(normal.scale))\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalLogPDF(self):\r\n batch_size = 6\r\n mu = constant_op.constant([3.0] * batch_size)\r\n sigma = constant_op.constant([math.sqrt(10.0)] * batch_size)\r\n x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n\r\n log_pdf = normal.log_prob(x)\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()), log_pdf.get_shape())\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()),\r\n self.evaluate(log_pdf).shape)\r\n self.assertAllEqual(normal.batch_shape, log_pdf.get_shape())\r\n self.assertAllEqual(normal.batch_shape, self.evaluate(log_pdf).shape)\r\n\r\n pdf = normal.prob(x)\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()), pdf.get_shape())\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()),\r\n self.evaluate(pdf).shape)\r\n self.assertAllEqual(normal.batch_shape, pdf.get_shape())\r\n self.assertAllEqual(normal.batch_shape, self.evaluate(pdf).shape)\r\n\r\n if not stats:\r\n return\r\n expected_log_pdf = stats.norm(self.evaluate(mu),\r\n self.evaluate(sigma)).logpdf(x)\r\n self.assertAllClose(expected_log_pdf, self.evaluate(log_pdf))\r\n self.assertAllClose(np.exp(expected_log_pdf), self.evaluate(pdf))\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalLogPDFMultidimensional(self):\r\n batch_size = 6\r\n mu = constant_op.constant([[3.0, -3.0]] * batch_size)\r\n sigma = constant_op.constant(\r\n [[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)\r\n x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n\r\n log_pdf = normal.log_prob(x)\r\n log_pdf_values = self.evaluate(log_pdf)\r\n self.assertEqual(log_pdf.get_shape(), (6, 2))\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()), log_pdf.get_shape())\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()),\r\n self.evaluate(log_pdf).shape)\r\n self.assertAllEqual(normal.batch_shape, log_pdf.get_shape())\r\n self.assertAllEqual(normal.batch_shape, self.evaluate(log_pdf).shape)\r\n\r\n pdf = normal.prob(x)\r\n pdf_values = self.evaluate(pdf)\r\n self.assertEqual(pdf.get_shape(), (6, 2))\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()), pdf.get_shape())\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()), pdf_values.shape)\r\n self.assertAllEqual(normal.batch_shape, pdf.get_shape())\r\n self.assertAllEqual(normal.batch_shape, pdf_values.shape)\r\n\r\n if not stats:\r\n return\r\n expected_log_pdf = stats.norm(self.evaluate(mu),\r\n self.evaluate(sigma)).logpdf(x)\r\n self.assertAllClose(expected_log_pdf, log_pdf_values)\r\n self.assertAllClose(np.exp(expected_log_pdf), pdf_values)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalCDF(self):\r\n batch_size = 50\r\n mu = self._rng.randn(batch_size)\r\n sigma = self._rng.rand(batch_size) + 1.0\r\n x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)\r\n\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n cdf = normal.cdf(x)\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()), cdf.get_shape())\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()),\r\n self.evaluate(cdf).shape)\r\n self.assertAllEqual(normal.batch_shape, cdf.get_shape())\r\n self.assertAllEqual(normal.batch_shape, self.evaluate(cdf).shape)\r\n if not stats:\r\n return\r\n expected_cdf = stats.norm(mu, sigma).cdf(x)\r\n self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=0)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalSurvivalFunction(self):\r\n batch_size = 50\r\n mu = self._rng.randn(batch_size)\r\n sigma = self._rng.rand(batch_size) + 1.0\r\n x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)\r\n\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n\r\n sf = normal.survival_function(x)\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()), sf.get_shape())\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()),\r\n self.evaluate(sf).shape)\r\n self.assertAllEqual(normal.batch_shape, sf.get_shape())\r\n self.assertAllEqual(normal.batch_shape, self.evaluate(sf).shape)\r\n if not stats:\r\n return\r\n expected_sf = stats.norm(mu, sigma).sf(x)\r\n self.assertAllClose(expected_sf, self.evaluate(sf), atol=0)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalLogCDF(self):\r\n batch_size = 50\r\n mu = self._rng.randn(batch_size)\r\n sigma = self._rng.rand(batch_size) + 1.0\r\n x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)\r\n\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n\r\n cdf = normal.log_cdf(x)\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()), cdf.get_shape())\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()),\r\n self.evaluate(cdf).shape)\r\n self.assertAllEqual(normal.batch_shape, cdf.get_shape())\r\n self.assertAllEqual(normal.batch_shape, self.evaluate(cdf).shape)\r\n\r\n if not stats:\r\n return\r\n expected_cdf = stats.norm(mu, sigma).logcdf(x)\r\n self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=0, rtol=1e-3)\r\n\r\n def testFiniteGradientAtDifficultPoints(self):\r\n for dtype in [np.float32, np.float64]:\r\n g = ops.Graph()\r\n with g.as_default():\r\n mu = variables.Variable(dtype(0.0))\r\n sigma = variables.Variable(dtype(1.0))\r\n dist = normal_lib.Normal(loc=mu, scale=sigma)\r\n x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)\r\n for func in [\r\n dist.cdf, dist.log_cdf, dist.survival_function,\r\n dist.log_survival_function, dist.log_prob, dist.prob\r\n ]:\r\n value = func(x)\r\n grads = gradients_impl.gradients(value, [mu, sigma])\r\n with self.session(graph=g):\r\n variables.global_variables_initializer().run()\r\n self.assertAllFinite(value)\r\n self.assertAllFinite(grads[0])\r\n self.assertAllFinite(grads[1])\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalLogSurvivalFunction(self):\r\n batch_size = 50\r\n mu = self._rng.randn(batch_size)\r\n sigma = self._rng.rand(batch_size) + 1.0\r\n x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)\r\n\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n\r\n sf = normal.log_survival_function(x)\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()), sf.get_shape())\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()),\r\n self.evaluate(sf).shape)\r\n self.assertAllEqual(normal.batch_shape, sf.get_shape())\r\n self.assertAllEqual(normal.batch_shape, self.evaluate(sf).shape)\r\n\r\n if not stats:\r\n return\r\n expected_sf = stats.norm(mu, sigma).logsf(x)\r\n self.assertAllClose(expected_sf, self.evaluate(sf), atol=0, rtol=1e-5)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalEntropyWithScalarInputs(self):\r\n # Scipy.stats.norm cannot deal with the shapes in the other test.\r\n mu_v = 2.34\r\n sigma_v = 4.56\r\n normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)\r\n\r\n entropy = normal.entropy()\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()), entropy.get_shape())\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()),\r\n self.evaluate(entropy).shape)\r\n self.assertAllEqual(normal.batch_shape, entropy.get_shape())\r\n self.assertAllEqual(normal.batch_shape, self.evaluate(entropy).shape)\r\n # scipy.stats.norm cannot deal with these shapes.\r\n if not stats:\r\n return\r\n expected_entropy = stats.norm(mu_v, sigma_v).entropy()\r\n self.assertAllClose(expected_entropy, self.evaluate(entropy))\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalEntropy(self):\r\n mu_v = np.array([1.0, 1.0, 1.0])\r\n sigma_v = np.array([[1.0, 2.0, 3.0]]).T\r\n normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)\r\n\r\n # scipy.stats.norm cannot deal with these shapes.\r\n sigma_broadcast = mu_v * sigma_v\r\n expected_entropy = 0.5 * np.log(2 * np.pi * np.exp(1) * sigma_broadcast**2)\r\n entropy = normal.entropy()\r\n np.testing.assert_allclose(expected_entropy, self.evaluate(entropy))\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()), entropy.get_shape())\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()),\r\n self.evaluate(entropy).shape)\r\n self.assertAllEqual(normal.batch_shape, entropy.get_shape())\r\n self.assertAllEqual(normal.batch_shape, self.evaluate(entropy).shape)\r\n\r\n @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)\r\n def testNormalMeanAndMode(self):\r\n # Mu will be broadcast to [7, 7, 7].\r\n mu = [7.]\r\n sigma = [11., 12., 13.]\r\n\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n\r\n self.assertAllEqual((3,), normal.mean().get_shape())\r\n self.assertAllEqual([7., 7, 7], self.evaluate(normal.mean()))\r\n\r\n self.assertAllEqual((3,), normal.mode().get_shape())\r\n self.assertAllEqual([7., 7, 7], self.evaluate(normal.mode()))\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalQuantile(self):\r\n batch_size = 52\r\n mu = self._rng.randn(batch_size)\r\n sigma = self._rng.rand(batch_size) + 1.0\r\n p = np.linspace(0., 1.0, batch_size - 2).astype(np.float64)\r\n # Quantile performs piecewise rational approximation so adding some\r\n # special input values to make sure we hit all the pieces.\r\n p = np.hstack((p, np.exp(-33), 1. - np.exp(-33)))\r\n\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n x = normal.quantile(p)\r\n\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()), x.get_shape())\r\n self.assertAllEqual(\r\n self.evaluate(normal.batch_shape_tensor()),\r\n self.evaluate(x).shape)\r\n self.assertAllEqual(normal.batch_shape, x.get_shape())\r\n self.assertAllEqual(normal.batch_shape, self.evaluate(x).shape)\r\n\r\n if not stats:\r\n return\r\n expected_x = stats.norm(mu, sigma).ppf(p)\r\n self.assertAllClose(expected_x, self.evaluate(x), atol=0.)\r\n\r\n def _baseQuantileFiniteGradientAtDifficultPoints(self, dtype):\r\n g = ops.Graph()\r\n with g.as_default():\r\n mu = variables.Variable(dtype(0.0))\r\n sigma = variables.Variable(dtype(1.0))\r\n dist = normal_lib.Normal(loc=mu, scale=sigma)\r\n p = variables.Variable(\r\n np.array([0.,\r\n np.exp(-32.), np.exp(-2.),\r\n 1. - np.exp(-2.), 1. - np.exp(-32.),\r\n 1.]).astype(dtype))\r\n\r\n value = dist.quantile(p)\r\n grads = gradients_impl.gradients(value, [mu, p])\r\n with self.cached_session(graph=g):\r\n variables.global_variables_initializer().run()\r\n self.assertAllFinite(grads[0])\r\n self.assertAllFinite(grads[1])\r\n\r\n def testQuantileFiniteGradientAtDifficultPointsFloat32(self):\r\n self._baseQuantileFiniteGradientAtDifficultPoints(np.float32)\r\n\r\n def testQuantileFiniteGradientAtDifficultPointsFloat64(self):\r\n self._baseQuantileFiniteGradientAtDifficultPoints(np.float64)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalVariance(self):\r\n # sigma will be broadcast to [7, 7, 7]\r\n mu = [1., 2., 3.]\r\n sigma = [7.]\r\n\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n\r\n self.assertAllEqual((3,), normal.variance().get_shape())\r\n self.assertAllEqual([49., 49, 49], self.evaluate(normal.variance()))\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalStandardDeviation(self):\r\n # sigma will be broadcast to [7, 7, 7]\r\n mu = [1., 2., 3.]\r\n sigma = [7.]\r\n\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n\r\n self.assertAllEqual((3,), normal.stddev().get_shape())\r\n self.assertAllEqual([7., 7, 7], self.evaluate(normal.stddev()))\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalSample(self):\r\n mu = constant_op.constant(3.0)\r\n sigma = constant_op.constant(math.sqrt(3.0))\r\n mu_v = 3.0\r\n sigma_v = np.sqrt(3.0)\r\n n = constant_op.constant(100000)\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n samples = normal.sample(n)\r\n sample_values = self.evaluate(samples)\r\n # Note that the standard error for the sample mean is ~ sigma / sqrt(n).\r\n # The sample variance similarly is dependent on sigma and n.\r\n # Thus, the tolerances below are very sensitive to number of samples\r\n # as well as the variances chosen.\r\n self.assertEqual(sample_values.shape, (100000,))\r\n self.assertAllClose(sample_values.mean(), mu_v, atol=1e-1)\r\n self.assertAllClose(sample_values.std(), sigma_v, atol=1e-1)\r\n\r\n expected_samples_shape = tensor_shape.TensorShape(\r\n [self.evaluate(n)]).concatenate(\r\n tensor_shape.TensorShape(\r\n self.evaluate(normal.batch_shape_tensor())))\r\n\r\n self.assertAllEqual(expected_samples_shape, samples.get_shape())\r\n self.assertAllEqual(expected_samples_shape, sample_values.shape)\r\n\r\n expected_samples_shape = (\r\n tensor_shape.TensorShape([self.evaluate(n)]).concatenate(\r\n normal.batch_shape))\r\n\r\n self.assertAllEqual(expected_samples_shape, samples.get_shape())\r\n self.assertAllEqual(expected_samples_shape, sample_values.shape)\r\n\r\n def testNormalFullyReparameterized(self):\r\n mu = constant_op.constant(4.0)\r\n sigma = constant_op.constant(3.0)\r\n with backprop.GradientTape() as tape:\r\n tape.watch(mu)\r\n tape.watch(sigma)\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n samples = normal.sample(100)\r\n grad_mu, grad_sigma = tape.gradient(samples, [mu, sigma])\r\n self.assertIsNotNone(grad_mu)\r\n self.assertIsNotNone(grad_sigma)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalSampleMultiDimensional(self):\r\n batch_size = 2\r\n mu = constant_op.constant([[3.0, -3.0]] * batch_size)\r\n sigma = constant_op.constant(\r\n [[math.sqrt(2.0), math.sqrt(3.0)]] * batch_size)\r\n mu_v = [3.0, -3.0]\r\n sigma_v = [np.sqrt(2.0), np.sqrt(3.0)]\r\n n = constant_op.constant(100000)\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n samples = normal.sample(n)\r\n sample_values = self.evaluate(samples)\r\n # Note that the standard error for the sample mean is ~ sigma / sqrt(n).\r\n # The sample variance similarly is dependent on sigma and n.\r\n # Thus, the tolerances below are very sensitive to number of samples\r\n # as well as the variances chosen.\r\n self.assertEqual(samples.get_shape(), (100000, batch_size, 2))\r\n self.assertAllClose(sample_values[:, 0, 0].mean(), mu_v[0], atol=1e-1)\r\n self.assertAllClose(sample_values[:, 0, 0].std(), sigma_v[0], atol=1e-1)\r\n self.assertAllClose(sample_values[:, 0, 1].mean(), mu_v[1], atol=1e-1)\r\n self.assertAllClose(sample_values[:, 0, 1].std(), sigma_v[1], atol=1e-1)\r\n\r\n expected_samples_shape = tensor_shape.TensorShape(\r\n [self.evaluate(n)]).concatenate(\r\n tensor_shape.TensorShape(\r\n self.evaluate(normal.batch_shape_tensor())))\r\n self.assertAllEqual(expected_samples_shape, samples.get_shape())\r\n self.assertAllEqual(expected_samples_shape, sample_values.shape)\r\n\r\n expected_samples_shape = (\r\n tensor_shape.TensorShape([self.evaluate(n)]).concatenate(\r\n normal.batch_shape))\r\n self.assertAllEqual(expected_samples_shape, samples.get_shape())\r\n self.assertAllEqual(expected_samples_shape, sample_values.shape)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNegativeSigmaFails(self):\r\n with self.assertRaisesOpError(\"Condition x > 0 did not hold\"):\r\n normal = normal_lib.Normal(\r\n loc=[1.], scale=[-5.], validate_args=True, name=\"G\")\r\n self.evaluate(normal.mean())\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalShape(self):\r\n mu = constant_op.constant([-3.0] * 5)\r\n sigma = constant_op.constant(11.0)\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n\r\n self.assertEqual(self.evaluate(normal.batch_shape_tensor()), [5])\r\n self.assertEqual(normal.batch_shape, tensor_shape.TensorShape([5]))\r\n self.assertAllEqual(self.evaluate(normal.event_shape_tensor()), [])\r\n self.assertEqual(normal.event_shape, tensor_shape.TensorShape([]))\r\n\r\n @test_util.run_deprecated_v1\r\n def testNormalShapeWithPlaceholders(self):\r\n mu = array_ops.placeholder(dtype=dtypes.float32)\r\n sigma = array_ops.placeholder(dtype=dtypes.float32)\r\n normal = normal_lib.Normal(loc=mu, scale=sigma)\r\n\r\n with self.cached_session() as sess:\r\n # get_batch_shape should return an \"<unknown>\" tensor.\r\n self.assertEqual(normal.batch_shape, tensor_shape.TensorShape(None))\r\n self.assertEqual(normal.event_shape, ())\r\n self.assertAllEqual(self.evaluate(normal.event_shape_tensor()), [])\r\n self.assertAllEqual(\r\n sess.run(normal.batch_shape_tensor(),\r\n feed_dict={mu: 5.0,\r\n sigma: [1.0, 2.0]}), [2])\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testNormalNormalKL(self):\r\n batch_size = 6\r\n mu_a = np.array([3.0] * batch_size)\r\n sigma_a = np.array([1.0, 2.0, 3.0, 1.5, 2.5, 3.5])\r\n mu_b = np.array([-3.0] * batch_size)\r\n sigma_b = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0])\r\n\r\n n_a = normal_lib.Normal(loc=mu_a, scale=sigma_a)\r\n n_b = normal_lib.Normal(loc=mu_b, scale=sigma_b)\r\n\r\n kl = kullback_leibler.kl_divergence(n_a, n_b)\r\n kl_val = self.evaluate(kl)\r\n\r\n kl_expected = ((mu_a - mu_b)**2 / (2 * sigma_b**2) + 0.5 * (\r\n (sigma_a**2 / sigma_b**2) - 1 - 2 * np.log(sigma_a / sigma_b)))\r\n\r\n self.assertEqual(kl.get_shape(), (batch_size,))\r\n self.assertAllClose(kl_val, kl_expected)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\nr\"\"\"Removes unneeded nodes from a GraphDef file.\r\n\r\nThis script is designed to help streamline models, by taking the input and\r\noutput nodes that will be used by an application and figuring out the smallest\r\nset of operations that are required to run for those arguments. The resulting\r\nminimal graph is then saved out.\r\n\r\nThe advantages of running this script are:\r\n - You may be able to shrink the file size.\r\n - Operations that are unsupported on your platform but still present can be\r\n safely removed.\r\nThe resulting graph may not be as flexible as the original though, since any\r\ninput nodes that weren't explicitly mentioned may not be accessible any more.\r\n\r\nAn example of command-line usage is:\r\nbazel build tensorflow/python/tools:strip_unused && \\\r\nbazel-bin/tensorflow/python/tools/strip_unused \\\r\n--input_graph=some_graph_def.pb \\\r\n--output_graph=/tmp/stripped_graph.pb \\\r\n--input_node_names=input0\r\n--output_node_names=softmax\r\n\r\nYou can also look at strip_unused_test.py for an example of how to use it.\r\n\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport argparse\r\nimport sys\r\n\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.platform import app\r\nfrom tensorflow.python.tools import strip_unused_lib\r\n\r\nFLAGS = None\r\n\r\n\r\ndef main(unused_args):\r\n strip_unused_lib.strip_unused_from_files(FLAGS.input_graph,\r\n FLAGS.input_binary,\r\n FLAGS.output_graph,\r\n FLAGS.output_binary,\r\n FLAGS.input_node_names,\r\n FLAGS.output_node_names,\r\n FLAGS.placeholder_type_enum)\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.register('type', 'bool', lambda v: v.lower() == 'true')\r\n parser.add_argument(\r\n '--input_graph',\r\n type=str,\r\n default='',\r\n help='TensorFlow \\'GraphDef\\' file to load.')\r\n parser.add_argument(\r\n '--input_binary',\r\n nargs='?',\r\n const=True,\r\n type='bool',\r\n default=False,\r\n help='Whether the input files are in binary format.')\r\n parser.add_argument(\r\n '--output_graph',\r\n type=str,\r\n default='',\r\n help='Output \\'GraphDef\\' file name.')\r\n parser.add_argument(\r\n '--output_binary',\r\n nargs='?',\r\n const=True,\r\n type='bool',\r\n default=True,\r\n help='Whether to write a binary format graph.')\r\n parser.add_argument(\r\n '--input_node_names',\r\n type=str,\r\n default='',\r\n help='The name of the input nodes, comma separated.')\r\n parser.add_argument(\r\n '--output_node_names',\r\n type=str,\r\n default='',\r\n help='The name of the output nodes, comma separated.')\r\n parser.add_argument(\r\n '--placeholder_type_enum',\r\n type=int,\r\n default=dtypes.float32.as_datatype_enum,\r\n help='The AttrValue enum to use for placeholders.')\r\n FLAGS, unparsed = parser.parse_known_args()\r\n app.run(main=main, argv=[sys.argv[0]] + unparsed)\r\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# =============================================================================\r\n\"\"\"Cloud TPU profiler client.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\nimport os\r\nimport sys\r\nfrom absl import app\r\nfrom absl import flags\r\nfrom distutils.version import LooseVersion\r\n\r\nfrom tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver as resolver\r\nfrom tensorflow.python.eager import profiler_client\r\nfrom tensorflow.python.framework import errors\r\nfrom tensorflow.python.framework import versions\r\nfrom tensorflow.python.platform import tf_logging as logging\r\nfrom tensorflow.python.tpu.profiler import version as profiler_version\r\n\r\nFLAGS = flags.FLAGS\r\n\r\n# Cloud TPU Cluster Resolvers\r\nflags.DEFINE_string(\r\n 'gcp_project', None,\r\n 'Project name for the Cloud TPU-enabled project. If not specified, we '\r\n 'will attempt to automatically detect the GCE project from metadata.')\r\nflags.DEFINE_string(\r\n 'tpu_zone',\r\n None,\r\n help='GCE zone where the Cloud TPU is located in. If not specified, we '\r\n 'will attempt to automatically detect the GCE project from metadata.')\r\nflags.DEFINE_string(\r\n 'tpu', None, 'Name of the Cloud TPU for Cluster Resolvers. You must '\r\n 'specify either this flag or --service_addr.')\r\n\r\n# Tool specific parameters\r\nflags.DEFINE_string(\r\n 'service_addr', None, 'Address of TPU profiler service e.g. '\r\n 'localhost:8466, you must specify either this flag or --tpu.')\r\nflags.DEFINE_string(\r\n 'workers_list', None, 'The list of worker TPUs that we are about to profile'\r\n ' e.g. 10.0.1.2, 10.0.1.3. You can specify this flag with --tpu or '\r\n '--service_addr to profile a subset of tpu nodes. You can also use only'\r\n '--tpu and leave this flag unspecified to profile all the tpus.')\r\nflags.DEFINE_string(\r\n 'logdir', None, 'Path of TensorBoard log directory e.g. /tmp/tb_log, '\r\n 'gs://tb_bucket')\r\nflags.DEFINE_integer('duration_ms', 0,\r\n 'Duration of tracing or monitoring in ms.')\r\nflags.DEFINE_integer(\r\n 'num_tracing_attempts', 3, 'Automatically retry N times when no trace '\r\n 'event is collected.')\r\nflags.DEFINE_boolean('include_dataset_ops', True,\r\n 'Set to false to profile longer TPU '\r\n 'device traces.')\r\n\r\n# Monitoring parameters\r\nflags.DEFINE_integer(\r\n 'monitoring_level', 0, 'Choose a monitoring level between '\r\n '1 and 2 to monitor your TPU job continuously. Level 2 is more verbose than'\r\n ' level 1 and shows more metrics.')\r\nflags.DEFINE_integer(\r\n 'num_queries', 100,\r\n 'This script will run monitoring for num_queries before it stops.')\r\nflags.DEFINE_boolean('display_timestamp', False,\r\n 'Set to true to display timestamp in monitoring results.')\r\n\r\n\r\ndef get_workers_list(cluster_resolver):\r\n \"\"\"Returns a comma separated list of TPU worker IP addresses.\r\n\r\n Gets cluster_spec from cluster_resolver. Use the worker's task indices to\r\n obtain and return a list of ip addresses.\r\n\r\n Args:\r\n cluster_resolver: TensorFlow TPUClusterResolver instance.\r\n\r\n Returns:\r\n A string of comma separated list of IP addresses. For example:\r\n '10.2.0.1,10.2.0.2,10.2.0.3,10.2.0.4'\r\n\r\n Raises:\r\n UnavailableError: cluster_resolver doesn't contain a valid cluster_spec.\r\n \"\"\"\r\n worker_job_name = 'worker'\r\n cluster_spec = cluster_resolver.cluster_spec()\r\n if not cluster_spec:\r\n raise errors.UnavailableError(\r\n 'None', 'None',\r\n 'Cluster spec not found, your client must run in GCE environment.')\r\n task_indices = cluster_spec.task_indices(worker_job_name)\r\n workers_list = [\r\n cluster_spec.task_address(worker_job_name, i).split(':')[0]\r\n for i in task_indices\r\n ]\r\n return ','.join(workers_list)\r\n\r\n\r\ndef monitoring_helper(service_addr, duration_ms, monitoring_level,\r\n display_timestamp, num_queries):\r\n \"\"\"Helper function to print monitoring results.\r\n\r\n Helper function to print monitoring results for num_queries times.\r\n\r\n Args:\r\n service_addr: Address of the TPU profiler service.\r\n duration_ms: Duration of one monitoring sample in milliseconds.\r\n monitoring_level: An integer between 1 and 2. Level 2 is more verbose than\r\n level 1 and shows more metrics.\r\n display_timestamp: Set to true to display timestamp in monitoring.\r\n num_queries: Number of monitoring samples to collect.\r\n \"\"\"\r\n if monitoring_level <= 0 or monitoring_level > 2:\r\n sys.exit('Please choose a monitoring level between 1 and 2.')\r\n\r\n for query in range(0, num_queries):\r\n res = profiler_client.monitor(service_addr, duration_ms, monitoring_level,\r\n display_timestamp)\r\n print('Cloud TPU Monitoring Results (Sample ', query, '):\\n\\n', res)\r\n\r\n\r\ndef run_main():\r\n app.run(main)\r\n\r\n\r\ndef main(unused_argv=None):\r\n logging.set_verbosity(logging.INFO)\r\n tf_version = versions.__version__\r\n print('TensorFlow version %s detected' % tf_version)\r\n print('Welcome to the Cloud TPU Profiler v%s' % profiler_version.__version__)\r\n\r\n if LooseVersion(tf_version) < LooseVersion('1.14.0'):\r\n sys.exit('You must install tensorflow >= 1.14.0 to use this plugin.')\r\n\r\n if not FLAGS.service_addr and not FLAGS.tpu:\r\n sys.exit('You must specify either --service_addr or --tpu.')\r\n\r\n tpu_cluster_resolver = None\r\n if FLAGS.service_addr:\r\n if FLAGS.tpu:\r\n logging.warn('Both --service_addr and --tpu are set. Ignoring '\r\n '--tpu and using --service_addr.')\r\n service_addr = FLAGS.service_addr\r\n else:\r\n try:\r\n tpu_cluster_resolver = (\r\n resolver.TPUClusterResolver([FLAGS.tpu],\r\n zone=FLAGS.tpu_zone,\r\n project=FLAGS.gcp_project))\r\n service_addr = tpu_cluster_resolver.get_master()\r\n except (ValueError, TypeError):\r\n sys.exit('Failed to find TPU %s in zone %s project %s. You may use '\r\n '--tpu_zone and --gcp_project to specify the zone and project of'\r\n ' your TPU.' % (FLAGS.tpu, FLAGS.tpu_zone, FLAGS.gcp_project))\r\n service_addr = service_addr.replace('grpc://', '').replace(':8470', ':8466')\r\n\r\n workers_list = ''\r\n if FLAGS.workers_list is not None:\r\n workers_list = FLAGS.workers_list\r\n elif tpu_cluster_resolver is not None:\r\n workers_list = get_workers_list(tpu_cluster_resolver)\r\n\r\n # If profiling duration was not set by user or set to a non-positive value,\r\n # we set it to a default value of 1000ms.\r\n duration_ms = FLAGS.duration_ms if FLAGS.duration_ms > 0 else 1000\r\n\r\n if FLAGS.monitoring_level > 0:\r\n print('Since monitoring level is provided, profile', service_addr, ' for ',\r\n FLAGS.duration_ms, ' ms and show metrics for ', FLAGS.num_queries,\r\n ' time(s).')\r\n monitoring_helper(service_addr, duration_ms, FLAGS.monitoring_level,\r\n FLAGS.display_timestamp, FLAGS.num_queries)\r\n else:\r\n if not FLAGS.logdir:\r\n sys.exit('You must specify either --logdir or --monitoring_level.')\r\n try:\r\n profiler_client.start_tracing(service_addr,\r\n os.path.expanduser(FLAGS.logdir),\r\n duration_ms, workers_list,\r\n FLAGS.include_dataset_ops,\r\n FLAGS.num_tracing_attempts)\r\n except errors.UnavailableError:\r\n sys.exit(0)\r\n\r\n\r\nif __name__ == '__main__':\r\n run_main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Quantile ops python wrappers.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\nimport re\r\n\r\nfrom tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils\r\n# pylint: disable=unused-import\r\nfrom tensorflow.contrib.boosted_trees.python.ops import boosted_trees_ops_loader\r\n# pylint: enable=unused-import\r\nfrom tensorflow.contrib.boosted_trees.python.ops import gen_quantile_ops\r\n\r\n# go/tf-wildcard-import\r\n# pylint: disable=wildcard-import,undefined-variable\r\nfrom tensorflow.contrib.boosted_trees.python.ops.gen_quantile_ops import *\r\n# pylint: enable=wildcard-import,undefined-variable\r\n\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import sparse_tensor\r\nfrom tensorflow.python.ops import resources\r\nfrom tensorflow.python.training import saver\r\nfrom tensorflow.python.training.tracking import tracking\r\n\r\n# Pattern to remove all non alpha numeric from a string.\r\n_PATTERN = re.compile(r\"[\\W_]+\")\r\n\r\n\r\nclass QuantileAccumulatorSaveable(saver.BaseSaverBuilder.SaveableObject):\r\n \"\"\"SaveableObject implementation for QuantileAccumulator.\"\"\"\r\n\r\n def __init__(self, resource_handle, create_op, name):\r\n self._resource_handle = resource_handle\r\n self._create_op = create_op\r\n stamp_token, state, are_buckets_ready, buckets = (\r\n gen_quantile_ops.quantile_accumulator_serialize(resource_handle))\r\n # slice_spec is useful for saving a slice from a variable.\r\n # It's not meaningful in quantile accumulator.\r\n slice_spec = \"\"\r\n def make_save_spec(tensor, suffix):\r\n return saver.BaseSaverBuilder.SaveSpec(tensor, slice_spec, name + suffix)\r\n\r\n specs = [make_save_spec(stamp_token, \"_stamp\")]\r\n specs += [make_save_spec(state, \"_state\")]\r\n specs += [make_save_spec(are_buckets_ready, \"_are_buckets_ready\")]\r\n specs += [make_save_spec(buckets, \"buckets\")]\r\n super(QuantileAccumulatorSaveable, self).__init__(self._resource_handle,\r\n specs, name)\r\n\r\n def restore(self, restored_tensors, unused_restored_shapes):\r\n \"\"\"Restores the associated quantile accumulator from 'restored_tensors'.\r\n\r\n Args:\r\n restored_tensors: the tensors that were loaded from a checkpoint.\r\n unused_restored_shapes: the shapes this object should conform to after\r\n restore.\r\n\r\n Returns:\r\n The operation that restores the state of the quantile accumulator.\r\n \"\"\"\r\n # Read the restored tensors with the same order that were added to saving\r\n # spec.\r\n stamp_token = restored_tensors[:1]\r\n state = restored_tensors[1:2]\r\n are_buckets_ready = restored_tensors[2:3]\r\n buckets = restored_tensors[3]\r\n with ops.control_dependencies([self._create_op]):\r\n return gen_quantile_ops.quantile_accumulator_deserialize(\r\n self._resource_handle,\r\n stamp_token=stamp_token,\r\n stream_state=state,\r\n are_buckets_ready=are_buckets_ready,\r\n buckets=buckets)\r\n\r\n\r\nclass QuantileAccumulator(tracking.TrackableResource):\r\n \"\"\"A resource that allows distributed quantile computation.\"\"\"\r\n\r\n def __init__(self,\r\n init_stamp_token,\r\n epsilon,\r\n num_quantiles,\r\n max_elements=None,\r\n name=None,\r\n container=None,\r\n generate_quantiles=False):\r\n \"\"\"Creates a QuantileAccumulator object.\r\n\r\n Args:\r\n init_stamp_token: The initial value for the stamp token.\r\n epsilon: Error bound on the quantile computation.\r\n num_quantiles: Number of quantiles to produce from the final summary.\r\n max_elements: Maximum number of elements added to the accumulator.\r\n name: the name to save the accumulator under.\r\n container: An optional `string`. Defaults to `\"\"`\r\n generate_quantiles: Generate quantiles instead of approximate boundaries.\r\n If true, exactly `num_quantiles` will be produced in the final summary.\r\n \"\"\"\r\n self._init_stamp_token = init_stamp_token\r\n self._epsilon = epsilon\r\n self._num_quantiles = num_quantiles\r\n self._max_elements = max_elements\r\n self._container = container\r\n self._generate_quantiles = generate_quantiles\r\n super(QuantileAccumulator, self).__init__()\r\n\r\n name = _PATTERN.sub(\"\", name)\r\n with ops.name_scope(name, \"QuantileAccumulator\") as name:\r\n self._name = name\r\n self._resource_handle = self._create_resource()\r\n self._init_op = self._initialize()\r\n is_initialized_op = self.is_initialized()\r\n resources.register_resource(self.resource_handle, self._init_op,\r\n is_initialized_op)\r\n self._saveable = QuantileAccumulatorSaveable(self.resource_handle,\r\n self._init_op, name)\r\n ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable)\r\n\r\n def _create_resource(self):\r\n return gen_quantile_ops.quantile_stream_resource_handle_op(\r\n container=self._container, shared_name=self._name, name=self._name)\r\n\r\n def _initialize(self):\r\n return gen_quantile_ops.create_quantile_accumulator(\r\n self.resource_handle,\r\n self._init_stamp_token,\r\n epsilon=self._epsilon,\r\n max_elements=self._max_elements,\r\n num_quantiles=self._num_quantiles,\r\n generate_quantiles=self._generate_quantiles)\r\n\r\n @property\r\n def initializer(self):\r\n if self._init_op is None:\r\n self._init_op = self._initialize()\r\n return self._init_op\r\n\r\n def is_initialized(self):\r\n return gen_quantile_ops.quantile_accumulator_is_initialized(\r\n self.resource_handle)\r\n\r\n def _gather_saveables_for_checkpoint(self):\r\n return {\"quantile_accumulator\", self.saveable}\r\n\r\n def get_buckets(self, stamp_token):\r\n \"\"\"Returns quantile buckets created during previous flush.\"\"\"\r\n are_buckets_ready, buckets = (\r\n gen_quantile_ops.quantile_accumulator_get_buckets(\r\n quantile_accumulator_handles=[self.resource_handle],\r\n stamp_token=stamp_token))\r\n return are_buckets_ready[0], buckets[0]\r\n\r\n def schedule_get_buckets(self):\r\n \"\"\"Returns a scheduled read of buckets created during previous flush.\"\"\"\r\n return batch_ops_utils.ScheduledStampedResourceOp(\r\n resource_handle=self.resource_handle,\r\n op=gen_quantile_ops.quantile_accumulator_get_buckets)\r\n\r\n def _make_summary(self, column, example_weights):\r\n if isinstance(column, sparse_tensor.SparseTensor):\r\n return gen_quantile_ops.make_quantile_summaries(\r\n dense_float_features=[],\r\n sparse_float_feature_indices=[column.indices],\r\n sparse_float_feature_values=[column.values],\r\n sparse_float_feature_shapes=[column.dense_shape],\r\n example_weights=example_weights,\r\n epsilon=self._epsilon / 2).sparse_summaries[0]\r\n else:\r\n return gen_quantile_ops.make_quantile_summaries(\r\n dense_float_features=[column],\r\n sparse_float_feature_indices=[],\r\n sparse_float_feature_values=[],\r\n sparse_float_feature_shapes=[],\r\n example_weights=example_weights,\r\n epsilon=self._epsilon / 2).dense_summaries[0]\r\n\r\n def add_summary(self, stamp_token, column, example_weights):\r\n \"\"\"Adds quantile summary to its stream in resource.\"\"\"\r\n summary = self._make_summary(column, example_weights)\r\n return gen_quantile_ops.quantile_accumulator_add_summaries(\r\n quantile_accumulator_handles=[self.resource_handle],\r\n stamp_token=stamp_token,\r\n summaries=[summary])\r\n\r\n def add_prebuilt_summary(self, stamp_token, summary):\r\n \"\"\"Adds quantile summary to its stream in resource.\"\"\"\r\n return gen_quantile_ops.quantile_accumulator_add_summaries(\r\n quantile_accumulator_handles=[self.resource_handle],\r\n stamp_token=stamp_token,\r\n summaries=[summary])\r\n\r\n def schedule_add_summary(self, stamp_token, column, example_weights):\r\n \"\"\"Schedules to add a quantile summary to its stream in resource.\"\"\"\r\n summary = self._make_summary(column, example_weights)\r\n return batch_ops_utils.ScheduledStampedResourceOp(\r\n op=gen_quantile_ops.quantile_accumulator_add_summaries,\r\n resource_handle=self.resource_handle,\r\n summaries=summary)\r\n\r\n def flush(self, stamp_token, next_stamp_token):\r\n \"\"\"Finalizes quantile summary stream and resets it for next iteration.\r\n\r\n Args:\r\n stamp_token: Expected current token.\r\n next_stamp_token: Next value for the token.\r\n Returns:\r\n The flush operation.\r\n \"\"\"\r\n return gen_quantile_ops.quantile_accumulator_flush(\r\n quantile_accumulator_handle=self.resource_handle,\r\n stamp_token=stamp_token,\r\n next_stamp_token=next_stamp_token)\r\n\r\n def flush_summary(self, stamp_token, next_stamp_token):\r\n \"\"\"Finalizes quantile summary stream and resets it for next iteration.\"\"\"\r\n result = gen_quantile_ops.quantile_accumulator_flush_summary(\r\n quantile_accumulator_handle=self.resource_handle,\r\n stamp_token=stamp_token,\r\n next_stamp_token=next_stamp_token)\r\n return result\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for GRU V2 layer.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport shutil\r\n\r\nfrom absl.testing import parameterized\r\nimport numpy as np\r\n\r\nfrom tensorflow.core.protobuf import config_pb2\r\nfrom tensorflow.core.protobuf import rewriter_config_pb2\r\nfrom tensorflow.python import keras\r\nfrom tensorflow.python.data.ops import dataset_ops\r\nfrom tensorflow.python.eager import backprop\r\nfrom tensorflow.python.eager import context\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import random_seed\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.keras import keras_parameterized\r\nfrom tensorflow.python.keras import testing_utils\r\nfrom tensorflow.python.keras.layers import recurrent as rnn_v1\r\nfrom tensorflow.python.keras.layers import recurrent_v2 as rnn\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import gen_math_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import random_ops\r\nfrom tensorflow.python.platform import test\r\nfrom tensorflow.python.training import gradient_descent\r\n\r\n\r\n# Global config for grappler setting that is used for graph mode test.\r\n_rewrites = rewriter_config_pb2.RewriterConfig()\r\n_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON\r\n_rewrites.min_graph_nodes = -1\r\n_graph_options = config_pb2.GraphOptions(rewrite_options=_rewrites)\r\n_config = config_pb2.ConfigProto(graph_options=_graph_options)\r\n\r\n\r\n@keras_parameterized.run_all_keras_modes(config=_config)\r\nclass GRUV2Test(keras_parameterized.TestCase):\r\n\r\n @parameterized.named_parameters(\r\n ('non_tan_activation', 'relu', 'sigmoid', 0, False, True, True),\r\n ('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True, True),\r\n ('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True, True),\r\n ('unroll', 'tanh', 'sigmoid', 0, True, True, True),\r\n ('not_use_bias', 'tanh', 'sigmoid', 0, False, False, True),\r\n ('not_reset_after', 'tanh', 'sigmoid', 0, False, True, False)\r\n )\r\n def test_could_use_defun_backend(self, activation, recurrent_activation,\r\n recurrent_dropout, unroll, use_bias,\r\n reset_after):\r\n layer = rnn.GRU(1,\r\n activation=activation,\r\n recurrent_activation=recurrent_activation,\r\n recurrent_dropout=recurrent_dropout,\r\n unroll=unroll,\r\n use_bias=use_bias,\r\n reset_after=reset_after)\r\n self.assertFalse(layer.could_use_cudnn)\r\n\r\n def test_keras_model_with_gru(self):\r\n input_shape = 10\r\n rnn_state_size = 8\r\n output_shape = 8\r\n timestep = 4\r\n batch = 100\r\n epoch = 10\r\n\r\n (x_train, y_train), _ = testing_utils.get_test_data(\r\n train_samples=batch,\r\n test_samples=0,\r\n input_shape=(timestep, input_shape),\r\n num_classes=output_shape)\r\n y_train = keras.utils.to_categorical(y_train, output_shape)\r\n\r\n layer = rnn.GRU(rnn_state_size)\r\n\r\n inputs = keras.layers.Input(\r\n shape=[timestep, input_shape], dtype=dtypes.float32)\r\n\r\n outputs = layer(inputs)\r\n model = keras.models.Model(inputs, outputs)\r\n model.compile('rmsprop', loss='mse')\r\n model.fit(x_train, y_train, epochs=epoch)\r\n model.evaluate(x_train, y_train)\r\n model.predict(x_train)\r\n\r\n def test_dynamic_behavior_GRU(self):\r\n num_samples = 2\r\n timesteps = 3\r\n embedding_dim = 4\r\n units = 2\r\n layer = rnn.GRU(units, input_shape=(None, embedding_dim))\r\n model = keras.models.Sequential()\r\n model.add(layer)\r\n model.compile(gradient_descent.GradientDescentOptimizer(0.001), 'mse')\r\n x = np.random.random((num_samples, timesteps, embedding_dim))\r\n y = np.random.random((num_samples, units))\r\n model.train_on_batch(x, y)\r\n\r\n def test_stacking_GRU(self):\r\n inputs = np.random.random((2, 3, 4))\r\n targets = np.abs(np.random.random((2, 3, 5)))\r\n targets /= targets.sum(axis=-1, keepdims=True)\r\n model = keras.models.Sequential()\r\n model.add(rnn.GRU(10, return_sequences=True, unroll=False))\r\n model.add(rnn.GRU(5, return_sequences=True, unroll=False))\r\n model.compile(\r\n loss='categorical_crossentropy',\r\n optimizer=gradient_descent.GradientDescentOptimizer(0.01))\r\n model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)\r\n\r\n def test_from_config_GRU(self):\r\n layer_class = rnn.GRU\r\n for stateful in (False, True):\r\n l1 = layer_class(units=1, stateful=stateful)\r\n l2 = layer_class.from_config(l1.get_config())\r\n assert l1.get_config() == l2.get_config()\r\n\r\n @test_util.run_v2_only\r\n def test_gru_v2_feature_parity_with_canonical_gru(self):\r\n input_shape = 10\r\n rnn_state_size = 8\r\n timestep = 4\r\n batch = 20\r\n\r\n (x_train, y_train), _ = testing_utils.get_test_data(\r\n train_samples=batch,\r\n test_samples=0,\r\n input_shape=(timestep, input_shape),\r\n num_classes=rnn_state_size,\r\n random_seed=random_seed.DEFAULT_GRAPH_SEED)\r\n y_train = keras.utils.to_categorical(y_train, rnn_state_size)\r\n # For the last batch item of the test data, we filter out the last\r\n # timestep to simulate the variable length sequence and masking test.\r\n x_train[-2:, -1, :] = 0.0\r\n y_train[-2:] = 0\r\n\r\n inputs = keras.layers.Input(\r\n shape=[timestep, input_shape], dtype=dtypes.float32)\r\n masked_input = keras.layers.Masking()(inputs)\r\n gru_layer = rnn_v1.GRU(rnn_state_size,\r\n recurrent_activation='sigmoid',\r\n reset_after=True)\r\n output = gru_layer(masked_input)\r\n gru_model = keras.models.Model(inputs, output)\r\n weights = gru_model.get_weights()\r\n y_1 = gru_model.predict(x_train)\r\n gru_model.compile('rmsprop', 'mse')\r\n gru_model.fit(x_train, y_train)\r\n y_2 = gru_model.predict(x_train)\r\n\r\n with test_util.device(use_gpu=True):\r\n cudnn_layer = rnn.GRU(rnn_state_size,\r\n recurrent_activation='sigmoid',\r\n reset_after=True)\r\n cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input))\r\n cudnn_model.set_weights(weights)\r\n y_3 = cudnn_model.predict(x_train)\r\n cudnn_model.compile('rmsprop', 'mse')\r\n cudnn_model.fit(x_train, y_train)\r\n y_4 = cudnn_model.predict(x_train)\r\n\r\n self.assertAllClose(y_1, y_3, rtol=2e-5, atol=2e-5)\r\n self.assertAllClose(y_2, y_4, rtol=2e-5, atol=2e-5)\r\n\r\n @parameterized.named_parameters(\r\n # test_name, use_bias, bias_initializer, activation\r\n ('normal', True, 'zeros'),\r\n ('no_bias', False, 'zeros'),\r\n ('random_bias', True, 'random_uniform'),\r\n )\r\n def test_gru_v2_model_save_load(self, use_bias, bias_initializer):\r\n temp_dir = self.get_temp_dir()\r\n self.addCleanup(shutil.rmtree, temp_dir)\r\n h5_path = os.path.join(temp_dir, 'test.h5')\r\n\r\n batch = 10\r\n timestep = 3\r\n input_dim = 5\r\n units = 2\r\n\r\n x = np.random.random((batch, timestep, input_dim))\r\n\r\n def build_model():\r\n inputs = keras.layers.Input(\r\n shape=[timestep, input_dim], dtype=dtypes.float32)\r\n layer = rnn.GRU(\r\n units,\r\n use_bias=use_bias,\r\n bias_initializer=bias_initializer)\r\n output = layer(inputs)\r\n return keras.models.Model(inputs, output), layer\r\n\r\n model, layer = build_model()\r\n y_ref = model.predict(x)\r\n model.save_weights(h5_path)\r\n\r\n cloned_model, new_layer = build_model()\r\n cloned_model.load_weights(h5_path)\r\n y = cloned_model.predict(x)\r\n\r\n self.assertAllClose(y, y_ref)\r\n self.assertAllClose(layer.get_weights(), new_layer.get_weights())\r\n\r\n def test_gru_v2_output_on_multiple_kernel(self):\r\n input_shape = 10\r\n rnn_state_size = 8\r\n timestep = 4\r\n batch = 100\r\n\r\n x_train = np.random.random((batch, timestep, input_shape))\r\n\r\n inputs = keras.layers.Input(\r\n shape=[timestep, input_shape], dtype=dtypes.float32)\r\n with test_util.device(use_gpu=False):\r\n layer = rnn.GRU(rnn_state_size)\r\n output = layer(inputs)\r\n cpu_model = keras.models.Model(inputs, output)\r\n weights = cpu_model.get_weights()\r\n y_1 = cpu_model.predict(x_train)\r\n\r\n with test_util.device(use_gpu=True):\r\n layer = rnn.GRU(rnn_state_size)\r\n output = layer(inputs)\r\n gpu_model = keras.models.Model(inputs, output)\r\n gpu_model.set_weights(weights)\r\n y_2 = gpu_model.predict(x_train)\r\n\r\n # Note that CuDNN uses 'sigmoid' as activation, so the GRU V2 uses\r\n # 'sigmoid' as default. Construct the canonical GRU with sigmoid to achieve\r\n # the same output.\r\n with test_util.device(use_gpu=True):\r\n layer = rnn_v1.GRU(rnn_state_size,\r\n recurrent_activation='sigmoid',\r\n reset_after=True)\r\n output = layer(inputs)\r\n canonical_model = keras.models.Model(inputs, output)\r\n canonical_model.set_weights(weights)\r\n y_3 = canonical_model.predict(x_train)\r\n\r\n self.assertAllClose(y_1, y_2, rtol=1e-5, atol=1e-5)\r\n self.assertAllClose(y_2, y_3, rtol=1e-5, atol=1e-5)\r\n\r\n @parameterized.named_parameters(\r\n # test_name, time_major, go_backwards\r\n ('normal', False, False),\r\n ('time_major', True, False),\r\n ('go_backwards', False, True),\r\n ('both', True, True),\r\n )\r\n def test_time_major_and_go_backward(self, time_major, go_backwards):\r\n input_shape = 10\r\n rnn_state_size = 8\r\n timestep = 4\r\n batch = 100\r\n\r\n x_train = np.random.random((batch, timestep, input_shape))\r\n\r\n def build_model(layer_cls):\r\n inputs = keras.layers.Input(\r\n shape=[timestep, input_shape], dtype=dtypes.float32)\r\n layer = layer_cls(rnn_state_size,\r\n recurrent_activation='sigmoid',\r\n time_major=time_major,\r\n return_sequences=True,\r\n go_backwards=go_backwards,\r\n reset_after=True)\r\n if time_major:\r\n converted_input = keras.layers.Lambda(\r\n lambda t: array_ops.transpose(t, [1, 0, 2]))(inputs)\r\n outputs = layer(converted_input)\r\n outputs = keras.layers.Lambda(\r\n lambda t: array_ops.transpose(t, [1, 0, 2]))(outputs)\r\n else:\r\n outputs = layer(inputs)\r\n return keras.models.Model(inputs, outputs)\r\n\r\n gru_model = build_model(rnn_v1.GRU)\r\n y_ref = gru_model.predict(x_train)\r\n weights = gru_model.get_weights()\r\n\r\n gru_v2_model = build_model(rnn.GRU)\r\n gru_v2_model.set_weights(weights)\r\n y = gru_v2_model.predict(x_train)\r\n\r\n self.assertAllClose(y, y_ref)\r\n\r\n def test_with_masking_layer_GRU(self):\r\n layer_class = rnn.GRU\r\n inputs = np.random.random((2, 3, 4))\r\n targets = np.abs(np.random.random((2, 3, 5)))\r\n targets /= targets.sum(axis=-1, keepdims=True)\r\n model = keras.models.Sequential()\r\n model.add(keras.layers.Masking(input_shape=(3, 4)))\r\n model.add(layer_class(units=5, return_sequences=True, unroll=False))\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=gradient_descent.GradientDescentOptimizer(0.001))\r\n model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)\r\n\r\n def test_masking_with_stacking_GRU(self):\r\n inputs = np.random.random((2, 3, 4))\r\n targets = np.abs(np.random.random((2, 3, 5)))\r\n targets /= targets.sum(axis=-1, keepdims=True)\r\n model = keras.models.Sequential()\r\n model.add(keras.layers.Masking(input_shape=(3, 4)))\r\n model.add(rnn.GRU(10, return_sequences=True, unroll=False))\r\n model.add(rnn.GRU(5, return_sequences=True, unroll=False))\r\n model.compile(\r\n loss='categorical_crossentropy',\r\n optimizer=gradient_descent.GradientDescentOptimizer(0.01))\r\n model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)\r\n\r\n def test_return_sequences_GRU(self):\r\n num_samples = 2\r\n timesteps = 3\r\n embedding_dim = 4\r\n units = 2\r\n testing_utils.layer_test(\r\n rnn.GRU,\r\n kwargs={'units': units,\r\n 'return_sequences': True},\r\n input_shape=(num_samples, timesteps, embedding_dim))\r\n\r\n def test_float64_GRU(self):\r\n num_samples = 2\r\n timesteps = 3\r\n embedding_dim = 4\r\n units = 2\r\n testing_utils.layer_test(\r\n rnn.GRU,\r\n kwargs={'units': units,\r\n 'return_sequences': True,\r\n 'dtype': 'float64'},\r\n input_shape=(num_samples, timesteps, embedding_dim),\r\n input_dtype='float64')\r\n\r\n def test_return_states_GRU(self):\r\n layer_class = rnn.GRU\r\n x = np.random.random((2, 3, 4))\r\n y = np.abs(np.random.random((2, 5)))\r\n s = np.abs(np.random.random((2, 5)))\r\n inputs = keras.layers.Input(\r\n shape=[3, 4], dtype=dtypes.float32)\r\n masked = keras.layers.Masking()(inputs)\r\n outputs, states = layer_class(units=5, return_state=True)(masked)\r\n\r\n model = keras.models.Model(inputs, [outputs, states])\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=gradient_descent.GradientDescentOptimizer(0.001))\r\n model.fit(x, [y, s], epochs=1, batch_size=2, verbose=1)\r\n\r\n def test_dropout_GRU(self):\r\n num_samples = 2\r\n timesteps = 3\r\n embedding_dim = 4\r\n units = 2\r\n testing_utils.layer_test(\r\n rnn.GRU,\r\n kwargs={'units': units,\r\n 'dropout': 0.1,\r\n 'recurrent_dropout': 0.1},\r\n input_shape=(num_samples, timesteps, embedding_dim))\r\n\r\n def test_constraints_GRU(self):\r\n embedding_dim = 4\r\n layer_class = rnn.GRU\r\n k_constraint = keras.constraints.max_norm(0.01)\r\n r_constraint = keras.constraints.max_norm(0.01)\r\n b_constraint = keras.constraints.max_norm(0.01)\r\n layer = layer_class(\r\n 5,\r\n return_sequences=False,\r\n weights=None,\r\n input_shape=(None, embedding_dim),\r\n kernel_constraint=k_constraint,\r\n recurrent_constraint=r_constraint,\r\n bias_constraint=b_constraint)\r\n layer.build((None, None, embedding_dim))\r\n self.assertEqual(layer.cell.kernel.constraint, k_constraint)\r\n self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)\r\n self.assertEqual(layer.cell.bias.constraint, b_constraint)\r\n\r\n @parameterized.parameters([0, 1, 2])\r\n def test_implementation_mode_GRU(self, implementation_mode):\r\n num_samples = 2\r\n timesteps = 3\r\n embedding_dim = 4\r\n units = 2\r\n testing_utils.layer_test(\r\n rnn.GRU,\r\n kwargs={'units': units,\r\n 'implementation': implementation_mode},\r\n input_shape=(num_samples, timesteps, embedding_dim))\r\n\r\n def test_regularizers_GRU(self):\r\n embedding_dim = 4\r\n layer_class = rnn.GRU\r\n layer = layer_class(\r\n 5,\r\n return_sequences=False,\r\n weights=None,\r\n input_shape=(None, embedding_dim),\r\n kernel_regularizer=keras.regularizers.l1(0.01),\r\n recurrent_regularizer=keras.regularizers.l1(0.01),\r\n bias_regularizer='l2',\r\n activity_regularizer='l1')\r\n layer.build((None, None, 2))\r\n self.assertEqual(len(layer.losses), 3)\r\n\r\n x = keras.backend.variable(np.ones((2, 3, 2)))\r\n layer(x)\r\n if context.executing_eagerly():\r\n self.assertEqual(len(layer.losses), 4)\r\n else:\r\n self.assertEqual(len(layer.get_losses_for(x)), 1)\r\n\r\n def test_statefulness_GRU(self):\r\n num_samples = 2\r\n timesteps = 3\r\n embedding_dim = 4\r\n units = 2\r\n layer_class = rnn.GRU\r\n model = keras.models.Sequential()\r\n model.add(\r\n keras.layers.Embedding(\r\n 4,\r\n embedding_dim,\r\n mask_zero=True,\r\n input_length=timesteps,\r\n batch_input_shape=(num_samples, timesteps)))\r\n layer = layer_class(\r\n units, return_sequences=False, stateful=True, weights=None)\r\n model.add(layer)\r\n model.compile(\r\n optimizer=gradient_descent.GradientDescentOptimizer(0.01),\r\n loss='mse',\r\n run_eagerly=testing_utils.should_run_eagerly(),\r\n experimental_run_tf_function=testing_utils.should_run_tf_function())\r\n out1 = model.predict(np.ones((num_samples, timesteps)))\r\n self.assertEqual(out1.shape, (num_samples, units))\r\n\r\n # train once so that the states change\r\n model.train_on_batch(\r\n np.ones((num_samples, timesteps)), np.ones((num_samples, units)))\r\n out2 = model.predict(np.ones((num_samples, timesteps)))\r\n\r\n # if the state is not reset, output should be different\r\n self.assertNotEqual(out1.max(), out2.max())\r\n\r\n # check that output changes after states are reset\r\n # (even though the model itself didn't change)\r\n layer.reset_states()\r\n out3 = model.predict(np.ones((num_samples, timesteps)))\r\n self.assertNotEqual(out2.max(), out3.max())\r\n\r\n # check that container-level reset_states() works\r\n model.reset_states()\r\n out4 = model.predict(np.ones((num_samples, timesteps)))\r\n np.testing.assert_allclose(out3, out4, atol=1e-5)\r\n\r\n # check that the call to `predict` updated the states\r\n out5 = model.predict(np.ones((num_samples, timesteps)))\r\n self.assertNotEqual(out4.max(), out5.max())\r\n\r\n # Check masking\r\n layer.reset_states()\r\n\r\n left_padded_input = np.ones((num_samples, timesteps))\r\n left_padded_input[0, :1] = 0\r\n left_padded_input[1, :2] = 0\r\n out6 = model.predict(left_padded_input)\r\n\r\n layer.reset_states()\r\n\r\n right_padded_input = np.ones((num_samples, timesteps))\r\n right_padded_input[0, -1:] = 0\r\n right_padded_input[1, -2:] = 0\r\n out7 = model.predict(right_padded_input)\r\n\r\n layer.reset_states()\r\n\r\n mix_padded_input = np.ones((num_samples, timesteps))\r\n mix_padded_input[0, 1] = 0\r\n mix_padded_input[1, 0] = 0\r\n mix_padded_input[1, 2] = 0\r\n out8 = model.predict(mix_padded_input)\r\n\r\n self.assertAllClose(out7, out6, atol=1e-5)\r\n self.assertAllClose(out8, out7, atol=1e-5)\r\n\r\n def test_stateful_GRU_training(self):\r\n # See b/123587692 for more context.\r\n vocab_size = 20\r\n embedding_dim = 10\r\n batch_size = 8\r\n timestep = 12\r\n units = 5\r\n x = np.random.randint(0, vocab_size, size=(batch_size, timestep))\r\n y = np.random.randint(0, vocab_size, size=(batch_size, timestep))\r\n\r\n model = keras.Sequential([\r\n keras.layers.Embedding(vocab_size, embedding_dim,\r\n batch_input_shape=[batch_size, timestep]),\r\n rnn.GRU(units, return_sequences=True, stateful=True),\r\n keras.layers.Dense(vocab_size)\r\n ])\r\n model.compile(\r\n optimizer='adam',\r\n loss='sparse_categorical_crossentropy',\r\n run_eagerly=testing_utils.should_run_eagerly(),\r\n experimental_run_tf_function=testing_utils.should_run_tf_function())\r\n model.fit(x, y, epochs=1, shuffle=False)\r\n\r\n @test_util.run_v2_only\r\n def test_explicit_device_with_go_backward_and_mask(self):\r\n batch_size = 8\r\n timestep = 7\r\n masksteps = 5\r\n units = 4\r\n\r\n inputs = np.random.randn(batch_size, timestep, units).astype(np.float32)\r\n mask = np.ones((batch_size, timestep)).astype(np.bool)\r\n mask[:, masksteps:] = 0\r\n\r\n # Test for V1 behavior.\r\n lstm_v1 = rnn_v1.GRU(units, return_sequences=True, go_backwards=True)\r\n with test_util.device(use_gpu=True):\r\n outputs_masked_v1 = lstm_v1(inputs, mask=constant_op.constant(mask))\r\n outputs_trimmed_v1 = lstm_v1(inputs[:, :masksteps])\r\n self.assertAllClose(outputs_masked_v1[:, -masksteps:], outputs_trimmed_v1)\r\n\r\n # Test for V2 behavior.\r\n lstm = rnn.GRU(units, return_sequences=True, go_backwards=True)\r\n with test_util.device(use_gpu=True):\r\n outputs_masked = lstm(inputs, mask=constant_op.constant(mask))\r\n outputs_trimmed = lstm(inputs[:, :masksteps])\r\n self.assertAllClose(outputs_masked[:, -masksteps:], outputs_trimmed)\r\n\r\n @test_util.run_deprecated_v1\r\n def test_v1_session_behavior(self):\r\n # See b/139132348 for more details.\r\n x = np.random.uniform(size=(100, 4, 8))\r\n y = np.random.uniform(size=(100, 1))\r\n dataset = dataset_ops.Dataset.from_tensor_slices(\r\n (x, y)).shuffle(100).batch(32)\r\n\r\n inp = keras.layers.Input(shape=(4, 8))\r\n layer = rnn.GRU(1)(inp)\r\n layer = keras.layers.Dense(1)(layer)\r\n\r\n model = keras.models.Model(inp, layer)\r\n\r\n model.compile(loss='mse', optimizer='sgd')\r\n model.fit(dataset)\r\n\r\n\r\nclass GRULayerGradientTapeTest(test.TestCase):\r\n\r\n @test_util.run_in_graph_and_eager_modes(config=_config)\r\n def test_in_tape(self):\r\n if not context.executing_eagerly():\r\n self.skipTest('bloo')\r\n time_steps = 10\r\n embedding_size = 11\r\n gru_unit_size = 12\r\n\r\n gru = rnn.GRU(gru_unit_size,\r\n return_sequences=True,\r\n return_state=True,\r\n recurrent_activation='sigmoid',\r\n recurrent_initializer='glorot_uniform')\r\n\r\n x = random_ops.random_uniform([1, time_steps, embedding_size])\r\n y = random_ops.random_uniform([1, gru_unit_size])\r\n\r\n with backprop.GradientTape() as tape:\r\n hidden_state = array_ops.zeros([1, gru_unit_size], dtype=dtypes.float32)\r\n _, state = gru(x, initial_state=hidden_state)\r\n\r\n loss = math_ops.reduce_mean(math_ops.square(state - y))\r\n\r\n tape.gradient(loss, gru.variables)\r\n\r\n\r\n@keras_parameterized.run_all_keras_modes(config=_config)\r\nclass GRUGraphRewriteTest(keras_parameterized.TestCase):\r\n\r\n input_shape = 10\r\n output_shape = 8\r\n rnn_state_size = 8\r\n timestep = 4\r\n batch = 100\r\n epoch = 1\r\n\r\n def _test_runtime_with_model(self, model):\r\n (x_train, y_train), _ = testing_utils.get_test_data(\r\n train_samples=self.batch,\r\n test_samples=0,\r\n input_shape=(self.timestep, self.input_shape),\r\n num_classes=self.output_shape)\r\n y_train = keras.utils.to_categorical(y_train, self.output_shape)\r\n\r\n model.compile(\r\n optimizer='sgd',\r\n loss=['categorical_crossentropy', None],\r\n experimental_run_tf_function=testing_utils.should_run_tf_function())\r\n\r\n existing_loss = 0\r\n for _ in range(self.epoch):\r\n history = model.fit(x_train, y_train)\r\n loss_value = history.history['loss'][0]\r\n\r\n self.assertNotEqual(existing_loss, loss_value)\r\n existing_loss = loss_value\r\n\r\n _, runtime_value = model.predict(x_train)\r\n if test.is_gpu_available():\r\n self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)\r\n else:\r\n self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)\r\n\r\n @test_util.run_v2_only\r\n def test_GRU_runtime(self):\r\n layer = rnn.GRU(self.rnn_state_size, return_runtime=True)\r\n\r\n inputs = keras.layers.Input(\r\n shape=[self.timestep, self.input_shape], dtype=dtypes.float32)\r\n\r\n outputs, runtime = layer(inputs)\r\n # Expand the runtime so that it is a 1D tensor instead of scalar.\r\n # TF model does not work with scalar model output, specially during\r\n # aggregation.\r\n runtime = keras.layers.Lambda(\r\n lambda x: array_ops.expand_dims(x, axis=-1))(runtime)\r\n model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])\r\n self._test_runtime_with_model(model)\r\n\r\n @test_util.run_v2_only\r\n def test_GRU_runtime_with_mask(self):\r\n # Masking will affect which backend is selected based on whether the mask\r\n # is strictly right padded.\r\n layer = rnn.GRU(self.rnn_state_size, return_runtime=True)\r\n\r\n inputs = keras.layers.Input(\r\n shape=[self.timestep, self.input_shape], dtype=dtypes.float32)\r\n masked_inputs = keras.layers.Masking()(inputs)\r\n\r\n outputs, runtime = layer(masked_inputs)\r\n # Expand the runtime so that it is a 1D tensor instead of scalar.\r\n # TF model does not work with scalar model output, specially during\r\n # aggregation.\r\n runtime = keras.layers.Lambda(\r\n lambda x: array_ops.expand_dims(x, axis=-1))(runtime)\r\n model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])\r\n\r\n (x_train, y_train), _ = testing_utils.get_test_data(\r\n train_samples=self.batch,\r\n test_samples=0,\r\n input_shape=(self.timestep, self.input_shape),\r\n num_classes=self.output_shape)\r\n y_train = keras.utils.to_categorical(y_train, self.output_shape)\r\n\r\n model.compile(\r\n optimizer='sgd',\r\n loss=['categorical_crossentropy', None],\r\n run_eagerly=testing_utils.should_run_eagerly(),\r\n experimental_run_tf_function=testing_utils.should_run_tf_function())\r\n\r\n model.fit(x_train, y_train)\r\n\r\n # Verify unpadded data.\r\n _, runtime_value = model.predict(x_train)\r\n if test.is_gpu_available():\r\n self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)\r\n else:\r\n self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)\r\n\r\n # Update x/y to be right padded by setting the last timestep to 0\r\n x_train[:, -1, :] = 0\r\n y_train[:, -1] = 0\r\n _, runtime_value = model.predict(x_train)\r\n if test.is_gpu_available():\r\n self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)\r\n else:\r\n self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)\r\n\r\n # Further update x/y to be mix padded (masks in the middle), and verify\r\n # only cpu kernel can be selected.\r\n x_train[:, -3, :] = 0\r\n y_train[:, -3] = 0\r\n _, runtime_value = model.predict(x_train)\r\n self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)\r\n\r\n @test_util.run_v2_only\r\n def test_GRU_runtime_with_cond(self):\r\n # This test is to demonstrate the graph rewrite of grappler plugin under\r\n # the condition that the function returns different number of internal\r\n # states.\r\n layer = rnn.GRU(self.rnn_state_size, return_runtime=True)\r\n\r\n inputs = keras.layers.Input(\r\n shape=[self.timestep, self.input_shape], dtype=dtypes.float32)\r\n\r\n zeros = array_ops.zeros([self.batch, self.output_shape])\r\n dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN)\r\n a = constant_op.constant(0)\r\n b = constant_op.constant(1)\r\n # Will always run the GRU layer.\r\n outputs, runtime = control_flow_ops.cond(\r\n gen_math_ops.less(a, b),\r\n lambda: layer(inputs),\r\n lambda: (zeros, dummy_runtime))\r\n\r\n # Expand the runtime so that it is a 1D tensor instead of scalar.\r\n # TF model does not work with scalar model output, specially during\r\n # aggregation.\r\n runtime = keras.layers.Lambda(\r\n lambda x: array_ops.expand_dims(x, axis=-1))(runtime)\r\n model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])\r\n self._test_runtime_with_model(model)\r\n\r\n\r\nif __name__ == '__main__':\r\n test.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Operations for generating and loading vocab remappings.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.ops import init_ops\r\nfrom tensorflow.python.training import checkpoint_ops\r\n\r\n\r\n# pylint: disable=protected-access,line-too-long\r\nload_and_remap_matrix_initializer = checkpoint_ops._load_and_remap_matrix_initializer\r\n# pylint: enable=line-too-long\r\nload_embedding_initializer = checkpoint_ops._load_embedding_initializer\r\n# pylint: enable=protected-access\r\n\r\n\r\ndef load_linear_multiclass_bias_initializer(ckpt_path,\r\n bias_tensor_name,\r\n new_class_vocab_size,\r\n old_class_vocab_file,\r\n new_class_vocab_file,\r\n num_class_oov_buckets=0,\r\n initializer=None,\r\n max_rows_in_memory=-1):\r\n \"\"\"Loads pre-trained multi-class biases for linear models from checkpoint.\r\n\r\n Wrapper around `load_and_remap_matrix_initializer()` specialized for loading\r\n multi-class bias and remapping according to the provided vocab files. See docs\r\n for `load_and_remap_matrix_initializer()` for more details. In this case, the\r\n provided row_vocab is the class vocabulary, and the expected shape is\r\n `[new_class_vocab_size, 1]`.\r\n\r\n Args:\r\n ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)\r\n from which the old matrix `Tensor` will be loaded.\r\n bias_tensor_name: Tensor name to load from in the checkpoints.\r\n new_class_vocab_size: Number of entries in the new class vocab.\r\n old_class_vocab_file: A scalar `Tensor` of type `string` containing the\r\n path to the old class vocabulary file.\r\n new_class_vocab_file: A scalar `Tensor` of type `string` containing the\r\n path to the new class vocabulary file.\r\n num_class_oov_buckets: `int` specifying the number of out-of-vocabulary\r\n buckets to use for the classes. Must be >= 0.\r\n initializer: Initializer function that accepts a 1-D tensor as the arg to\r\n specify the shape of the returned tensor. If `None`, defaults to using\r\n `zeros_initializer()`.\r\n max_rows_in_memory: `int` specifying the maximum number of rows to load from\r\n the checkpoint at once. If less than or equal to 0, the entire matrix will\r\n be loaded into memory. Setting this arg trades increased disk reads for\r\n lower memory usage.\r\n\r\n Returns:\r\n A variable initializer function.\r\n \"\"\"\r\n # Linear multi-class biases should be zero-initialized.\r\n if initializer is None:\r\n initializer = init_ops.zeros_initializer()\r\n\r\n return load_and_remap_matrix_initializer(\r\n ckpt_path=ckpt_path,\r\n old_tensor_name=bias_tensor_name,\r\n new_row_vocab_size=new_class_vocab_size,\r\n new_col_vocab_size=1,\r\n old_row_vocab_file=old_class_vocab_file,\r\n new_row_vocab_file=new_class_vocab_file,\r\n old_col_vocab_file=None,\r\n new_col_vocab_file=None,\r\n num_row_oov_buckets=num_class_oov_buckets,\r\n num_col_oov_buckets=0,\r\n initializer=initializer,\r\n max_rows_in_memory=max_rows_in_memory)\r\n\r\n\r\ndef load_variable_slot_initializer(ckpt_path,\r\n old_tensor_name,\r\n primary_partition_info,\r\n new_row_vocab_size,\r\n new_col_vocab_size,\r\n old_row_vocab_file=None,\r\n new_row_vocab_file=None,\r\n old_col_vocab_file=None,\r\n new_col_vocab_file=None,\r\n num_row_oov_buckets=0,\r\n num_col_oov_buckets=0,\r\n initializer=None,\r\n max_rows_in_memory=-1):\r\n \"\"\"Loads pre-trained multi-class slots for linear models from checkpoint.\r\n\r\n Wrapper around `load_and_remap_matrix_initializer()` specialized for loading\r\n multi-class slots (such as optimizer accumulators) and remapping them\r\n according to the provided vocab files. See docs for\r\n `load_and_remap_matrix_initializer()` for more details. Takes in a\r\n `variable_scope._PartitionInfo` representing the slot's primary `Variable`'s\r\n partitioning. This is necessary since accumulator `Variable` creation ignores\r\n primary scoping and partitioning information.\r\n\r\n Args:\r\n ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)\r\n from which the old matrix `Tensor` will be loaded.\r\n old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.\r\n primary_partition_info: A `variable_scope._PartitionInfo` containing this\r\n slot's primary `Variable`'s partitioning information. This is used to\r\n calculate the offset and override the partition_info passed to the call to\r\n _initialize.\r\n new_row_vocab_size: `int` specifying the number of entries in\r\n `new_row_vocab_file`. If no row remapping is needed (no row vocab\r\n provided), this should be equal to the number of rows to load from the old\r\n matrix (which can theoretically be smaller than the number of rows in the\r\n old matrix).\r\n new_col_vocab_size: `int` specifying the number of entries in\r\n `new_col_vocab_file`. If no column remapping is needed (no column vocab\r\n provided), this should be equal to the number of columns in the old\r\n matrix.\r\n old_row_vocab_file: A scalar `Tensor` of type `string` containing the\r\n path to the old row vocabulary file. Can be None, which represents no\r\n remapping on the row axis.\r\n new_row_vocab_file: A scalar `Tensor` of type `string` containing the path\r\n to the new row vocabulary file. Can be None, which represents no remapping\r\n on the row axis.\r\n old_col_vocab_file: A scalar `Tensor` of type `string` containing the\r\n path to the old column vocabulary file. Can be None, which represents no\r\n remapping on the column axis.\r\n new_col_vocab_file: A scalar `Tensor` of type `string` containing the path\r\n to the new column vocabulary file. Can be None, which represents no\r\n remapping on the column axis.\r\n num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows\r\n to append. Must be >= 0.\r\n num_col_oov_buckets: `int` specifying the number of out-of-vocabulary\r\n columns to append. Must be >= 0.\r\n initializer: Initializer function to initialize missing values. Accepts a\r\n 1-D tensor as the arg to specify the shape of the returned tensor. If\r\n `None`, defaults to using `zeros_initializer()`.\r\n max_rows_in_memory: `int` specifying the maximum number of rows to load from\r\n the checkpoint at once. If less than or equal to 0, the entire matrix will\r\n be loaded into memory. Setting this arg trades increased disk reads for\r\n lower memory usage.\r\n\r\n Returns:\r\n A variable initializer function that should be used to initialize a\r\n (potentially partitioned) `Variable` whose complete shape is\r\n `[new_row_vocab_size + num_row_oov_buckets, new_col_vocab_size +\r\n num_col_oov_buckets]`.\r\n\r\n Raises:\r\n TypeError: If `initializer` is specified but not callable.\r\n \"\"\"\r\n initializer_fn = load_and_remap_matrix_initializer(\r\n ckpt_path=ckpt_path,\r\n old_tensor_name=old_tensor_name,\r\n new_row_vocab_size=new_row_vocab_size,\r\n new_col_vocab_size=new_col_vocab_size,\r\n old_row_vocab_file=old_row_vocab_file,\r\n new_row_vocab_file=new_row_vocab_file,\r\n old_col_vocab_file=old_col_vocab_file,\r\n new_col_vocab_file=new_col_vocab_file,\r\n num_row_oov_buckets=num_row_oov_buckets,\r\n num_col_oov_buckets=num_col_oov_buckets,\r\n initializer=initializer,\r\n max_rows_in_memory=max_rows_in_memory)\r\n\r\n def _initializer(shape, dtype=dtypes.float32, partition_info=None):\r\n del partition_info # Unused by this override.\r\n return initializer_fn(shape, dtype, partition_info=primary_partition_info)\r\n\r\n return _initializer\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n# pylint: disable=g-short-docstring-punctuation\r\n\"\"\"Asserts and Boolean Checks.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport numpy as np\r\n\r\nfrom tensorflow.python.eager import context\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import errors\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import sparse_tensor\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.framework import tensor_util\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.util import compat\r\nfrom tensorflow.python.util import deprecation\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\nNUMERIC_TYPES = frozenset(\r\n [dtypes.float32, dtypes.float64, dtypes.int8, dtypes.int16, dtypes.int32,\r\n dtypes.int64, dtypes.uint8, dtypes.qint8, dtypes.qint32, dtypes.quint8,\r\n dtypes.complex64])\r\n\r\n__all__ = [\r\n 'assert_negative',\r\n 'assert_positive',\r\n 'assert_proper_iterable',\r\n 'assert_non_negative',\r\n 'assert_non_positive',\r\n 'assert_equal',\r\n 'assert_none_equal',\r\n 'assert_near',\r\n 'assert_integer',\r\n 'assert_less',\r\n 'assert_less_equal',\r\n 'assert_greater',\r\n 'assert_greater_equal',\r\n 'assert_rank',\r\n 'assert_rank_at_least',\r\n 'assert_rank_in',\r\n 'assert_same_float_dtype',\r\n 'assert_scalar',\r\n 'assert_type',\r\n 'assert_shapes',\r\n 'is_non_decreasing',\r\n 'is_numeric_tensor',\r\n 'is_strictly_increasing',\r\n]\r\n\r\n\r\ndef _maybe_constant_value_string(t):\r\n if not isinstance(t, ops.Tensor):\r\n return str(t)\r\n const_t = tensor_util.constant_value(t)\r\n if const_t is not None:\r\n return str(const_t)\r\n return t\r\n\r\n\r\ndef _assert_static(condition, data):\r\n \"\"\"Raises a InvalidArgumentError with as much information as possible.\"\"\"\r\n if not condition:\r\n data_static = [_maybe_constant_value_string(x) for x in data]\r\n raise errors.InvalidArgumentError(node_def=None, op=None,\r\n message='\\n'.join(data_static))\r\n\r\n\r\ndef _shape_and_dtype_str(tensor):\r\n \"\"\"Returns a string containing tensor's shape and dtype.\"\"\"\r\n return 'shape=%s dtype=%s' % (tensor.shape, tensor.dtype.name)\r\n\r\n\r\n@tf_export(\r\n 'debugging.assert_proper_iterable',\r\n v1=['debugging.assert_proper_iterable', 'assert_proper_iterable'])\r\[email protected]_endpoints('assert_proper_iterable')\r\ndef assert_proper_iterable(values):\r\n \"\"\"Static assert that values is a \"proper\" iterable.\r\n\r\n `Ops` that expect iterables of `Tensor` can call this to validate input.\r\n Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves.\r\n\r\n Args:\r\n values: Object to be checked.\r\n\r\n Raises:\r\n TypeError: If `values` is not iterable or is one of\r\n `Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.\r\n \"\"\"\r\n unintentional_iterables = (\r\n (ops.Tensor, sparse_tensor.SparseTensor, np.ndarray)\r\n + compat.bytes_or_text_types\r\n )\r\n if isinstance(values, unintentional_iterables):\r\n raise TypeError(\r\n 'Expected argument \"values\" to be a \"proper\" iterable. Found: %s' %\r\n type(values))\r\n\r\n if not hasattr(values, '__iter__'):\r\n raise TypeError(\r\n 'Expected argument \"values\" to be iterable. Found: %s' % type(values))\r\n\r\n\r\n@tf_export('debugging.assert_negative', v1=[])\r\ndef assert_negative_v2(x, message=None, summarize=None, name=None):\r\n \"\"\"Assert the condition `x < 0` holds element-wise.\r\n\r\n This Op checks that `x[i] < 0` holds for every element of `x`. If `x` is\r\n empty, this is trivially satisfied.\r\n\r\n If `x` is not negative everywhere, `message`, as well as the first `summarize`\r\n entries of `x` are printed, and `InvalidArgumentError` is raised.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n message: A string to prefix to the default message.\r\n summarize: Print this many entries of each tensor.\r\n name: A name for this operation (optional). Defaults to \"assert_negative\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless `x` is all negative. This can be\r\n used with `tf.control_dependencies` inside of `tf.function`s to block\r\n followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: if the check can be performed immediately and\r\n `x[i] < 0` is False. The check can be performed immediately during eager\r\n execution or if `x` is statically known.\r\n \"\"\"\r\n return assert_negative(x=x, message=message, summarize=summarize, name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_negative', 'assert_negative'])\r\[email protected]_endpoints('assert_negative')\r\ndef assert_negative(x, data=None, summarize=None, message=None, name=None):\r\n \"\"\"Assert the condition `x < 0` holds element-wise.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_negative(x)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n Negative means, for every element `x[i]` of `x`, we have `x[i] < 0`.\r\n If `x` is empty this is trivially satisfied.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and first few entries of `x`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to \"assert_negative\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless `x` is all negative.\r\n \"\"\"\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_negative', [x, data]):\r\n x = ops.convert_to_tensor(x, name='x')\r\n if data is None:\r\n if context.executing_eagerly():\r\n name = _shape_and_dtype_str(x)\r\n else:\r\n name = x.name\r\n data = [\r\n message,\r\n 'Condition x < 0 did not hold element-wise:',\r\n 'x (%s) = ' % name, x]\r\n zero = ops.convert_to_tensor(0, dtype=x.dtype)\r\n return assert_less(x, zero, data=data, summarize=summarize)\r\n\r\n\r\n@tf_export('debugging.assert_positive', v1=[])\r\ndef assert_positive_v2(x, message=None, summarize=None, name=None):\r\n \"\"\"Assert the condition `x > 0` holds element-wise.\r\n\r\n This Op checks that `x[i] > 0` holds for every element of `x`. If `x` is\r\n empty, this is trivially satisfied.\r\n\r\n If `x` is not positive everywhere, `message`, as well as the first `summarize`\r\n entries of `x` are printed, and `InvalidArgumentError` is raised.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n message: A string to prefix to the default message.\r\n summarize: Print this many entries of each tensor.\r\n name: A name for this operation (optional). Defaults to \"assert_positive\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless `x` is all positive. This can be\r\n used with `tf.control_dependencies` inside of `tf.function`s to block\r\n followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: if the check can be performed immediately and\r\n `x[i] > 0` is False. The check can be performed immediately during eager\r\n execution or if `x` is statically known.\r\n \"\"\"\r\n return assert_positive(x=x, summarize=summarize, message=message, name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_positive', 'assert_positive'])\r\[email protected]_endpoints('assert_positive')\r\ndef assert_positive(x, data=None, summarize=None, message=None, name=None):\r\n \"\"\"Assert the condition `x > 0` holds element-wise.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_positive(x)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n Positive means, for every element `x[i]` of `x`, we have `x[i] > 0`.\r\n If `x` is empty this is trivially satisfied.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and first few entries of `x`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to \"assert_positive\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless `x` is all positive.\r\n \"\"\"\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_positive', [x, data]):\r\n x = ops.convert_to_tensor(x, name='x')\r\n if data is None:\r\n if context.executing_eagerly():\r\n name = _shape_and_dtype_str(x)\r\n else:\r\n name = x.name\r\n data = [\r\n message, 'Condition x > 0 did not hold element-wise:',\r\n 'x (%s) = ' % name, x]\r\n zero = ops.convert_to_tensor(0, dtype=x.dtype)\r\n return assert_less(zero, x, data=data, summarize=summarize)\r\n\r\n\r\n@tf_export('debugging.assert_non_negative', v1=[])\r\ndef assert_non_negative_v2(x, message=None, summarize=None, name=None):\r\n \"\"\"Assert the condition `x >= 0` holds element-wise.\r\n\r\n This Op checks that `x[i] >= 0` holds for every element of `x`. If `x` is\r\n empty, this is trivially satisfied.\r\n\r\n If `x` is not >= 0 everywhere, `message`, as well as the first `summarize`\r\n entries of `x` are printed, and `InvalidArgumentError` is raised.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n message: A string to prefix to the default message.\r\n summarize: Print this many entries of each tensor.\r\n name: A name for this operation (optional). Defaults to\r\n \"assert_non_negative\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless `x` is all non-negative. This can\r\n be used with `tf.control_dependencies` inside of `tf.function`s to block\r\n followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: if the check can be performed immediately and\r\n `x[i] >= 0` is False. The check can be performed immediately during eager\r\n execution or if `x` is statically known.\r\n \"\"\"\r\n return assert_non_negative(x=x, summarize=summarize, message=message,\r\n name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_non_negative', 'assert_non_negative'])\r\[email protected]_endpoints('assert_non_negative')\r\ndef assert_non_negative(x, data=None, summarize=None, message=None, name=None):\r\n \"\"\"Assert the condition `x >= 0` holds element-wise.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_non_negative(x)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n Non-negative means, for every element `x[i]` of `x`, we have `x[i] >= 0`.\r\n If `x` is empty this is trivially satisfied.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and first few entries of `x`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional).\r\n Defaults to \"assert_non_negative\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless `x` is all non-negative.\r\n \"\"\"\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_non_negative', [x, data]):\r\n x = ops.convert_to_tensor(x, name='x')\r\n if data is None:\r\n if context.executing_eagerly():\r\n name = _shape_and_dtype_str(x)\r\n else:\r\n name = x.name\r\n data = [\r\n message,\r\n 'Condition x >= 0 did not hold element-wise:',\r\n 'x (%s) = ' % name, x]\r\n zero = ops.convert_to_tensor(0, dtype=x.dtype)\r\n return assert_less_equal(zero, x, data=data, summarize=summarize)\r\n\r\n\r\n@tf_export('debugging.assert_non_positive', v1=[])\r\ndef assert_non_positive_v2(x, message=None, summarize=None, name=None):\r\n \"\"\"Assert the condition `x <= 0` holds element-wise.\r\n\r\n This Op checks that `x[i] <= 0` holds for every element of `x`. If `x` is\r\n empty, this is trivially satisfied.\r\n\r\n If `x` is not <= 0 everywhere, `message`, as well as the first `summarize`\r\n entries of `x` are printed, and `InvalidArgumentError` is raised.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n message: A string to prefix to the default message.\r\n summarize: Print this many entries of each tensor.\r\n name: A name for this operation (optional). Defaults to\r\n \"assert_non_positive\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless `x` is all non-positive. This can\r\n be used with `tf.control_dependencies` inside of `tf.function`s to block\r\n followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: if the check can be performed immediately and\r\n `x[i] <= 0` is False. The check can be performed immediately during eager\r\n execution or if `x` is statically known.\r\n \"\"\"\r\n return assert_non_positive(x=x, summarize=summarize, message=message,\r\n name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_non_positive', 'assert_non_positive'])\r\[email protected]_endpoints('assert_non_positive')\r\ndef assert_non_positive(x, data=None, summarize=None, message=None, name=None):\r\n \"\"\"Assert the condition `x <= 0` holds element-wise.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_non_positive(x)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n Non-positive means, for every element `x[i]` of `x`, we have `x[i] <= 0`.\r\n If `x` is empty this is trivially satisfied.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and first few entries of `x`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional).\r\n Defaults to \"assert_non_positive\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless `x` is all non-positive.\r\n \"\"\"\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_non_positive', [x, data]):\r\n x = ops.convert_to_tensor(x, name='x')\r\n if data is None:\r\n if context.executing_eagerly():\r\n name = _shape_and_dtype_str(x)\r\n else:\r\n name = x.name\r\n data = [\r\n message,\r\n 'Condition x <= 0 did not hold element-wise:'\r\n 'x (%s) = ' % name, x]\r\n zero = ops.convert_to_tensor(0, dtype=x.dtype)\r\n return assert_less_equal(x, zero, data=data, summarize=summarize)\r\n\r\n\r\n@tf_export('debugging.assert_equal', 'assert_equal', v1=[])\r\ndef assert_equal_v2(x, y, message=None, summarize=None, name=None):\r\n \"\"\"Assert the condition `x == y` holds element-wise.\r\n\r\n This Op checks that `x[i] == y[i]` holds for every pair of (possibly\r\n broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is\r\n trivially satisfied.\r\n\r\n If `x` and `y` are not equal, `message`, as well as the first `summarize`\r\n entries of `x` and `y` are printed, and `InvalidArgumentError` is raised.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\r\n message: A string to prefix to the default message.\r\n summarize: Print this many entries of each tensor.\r\n name: A name for this operation (optional). Defaults to \"assert_equal\".\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x == y` is False. This can be\r\n used with `tf.control_dependencies` inside of `tf.function`s to block\r\n followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: if the check can be performed immediately and\r\n `x == y` is False. The check can be performed immediately during eager\r\n execution or if `x` and `y` are statically known.\r\n \"\"\"\r\n return assert_equal(x=x, y=y, summarize=summarize, message=message, name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_equal', 'assert_equal'])\r\ndef assert_equal(x, y, data=None, summarize=None, message=None, name=None):\r\n \"\"\"Assert the condition `x == y` holds element-wise.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_equal(x, y)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n This condition holds if for every pair of (possibly broadcast) elements\r\n `x[i]`, `y[i]`, we have `x[i] == y[i]`.\r\n If both `x` and `y` are empty, this is trivially satisfied.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and first few entries of `x`, `y`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to \"assert_equal\".\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x == y` is False.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: if the check can be performed immediately and\r\n `x == y` is False. The check can be performed immediately during eager\r\n execution or if `x` and `y` are statically known.\r\n \"\"\"\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_equal', [x, y, data]):\r\n x = ops.convert_to_tensor(x, name='x')\r\n y = ops.convert_to_tensor(y, name='y')\r\n\r\n # Short-circuit if x and y are the same tensor.\r\n if x is y:\r\n return None if context.executing_eagerly() else control_flow_ops.no_op()\r\n\r\n if context.executing_eagerly():\r\n eq = math_ops.equal(x, y)\r\n condition = math_ops.reduce_all(eq)\r\n if not condition:\r\n # Prepare a message with first elements of x and y.\r\n summary_msg = ''\r\n # Default to printing 3 elements like control_flow_ops.Assert (used\r\n # by graph mode) does.\r\n summarize = 3 if summarize is None else summarize\r\n if summarize:\r\n # reshape((-1,)) is the fastest way to get a flat array view.\r\n x_np = x.numpy().reshape((-1,))\r\n y_np = y.numpy().reshape((-1,))\r\n x_sum = min(x_np.size, summarize)\r\n y_sum = min(y_np.size, summarize)\r\n summary_msg = ('First %d elements of x:\\n%s\\n'\r\n 'First %d elements of y:\\n%s\\n' %\r\n (x_sum, x_np[:x_sum],\r\n y_sum, y_np[:y_sum]))\r\n\r\n index_and_values_str = ''\r\n if x.shape == y.shape and x.shape.as_list():\r\n # If the shapes of x and y are the same (and not scalars),\r\n # Get the values that actually differed and their indices.\r\n # If shapes are different this information is more confusing\r\n # than useful.\r\n mask = math_ops.logical_not(eq)\r\n indices = array_ops.where(mask)\r\n indices_np = indices.numpy()\r\n x_vals = array_ops.boolean_mask(x, mask)\r\n y_vals = array_ops.boolean_mask(y, mask)\r\n summarize = min(summarize, indices_np.shape[0])\r\n index_and_values_str = (\r\n 'Indices of first %s different values:\\n%s\\n'\r\n 'Corresponding x values:\\n%s\\n'\r\n 'Corresponding y values:\\n%s\\n' %\r\n (summarize, indices_np[:summarize],\r\n x_vals.numpy().reshape((-1,))[:summarize],\r\n y_vals.numpy().reshape((-1,))[:summarize]))\r\n\r\n raise errors.InvalidArgumentError(\r\n node_def=None, op=None,\r\n message=('%s\\nCondition x == y did not hold.\\n%s%s' %\r\n (message or '', index_and_values_str, summary_msg)))\r\n return\r\n\r\n if data is None:\r\n data = [\r\n message,\r\n 'Condition x == y did not hold element-wise:',\r\n 'x (%s) = ' % x.name, x,\r\n 'y (%s) = ' % y.name, y\r\n ]\r\n condition = math_ops.reduce_all(math_ops.equal(x, y))\r\n x_static = tensor_util.constant_value(x)\r\n y_static = tensor_util.constant_value(y)\r\n if x_static is not None and y_static is not None:\r\n condition_static = np.all(np.equal(x_static, y_static))\r\n _assert_static(condition_static, data)\r\n return control_flow_ops.Assert(condition, data, summarize=summarize)\r\n\r\n\r\n@tf_export('debugging.assert_none_equal', v1=[])\r\ndef assert_none_equal_v2(x, y, summarize=None, message=None, name=None):\r\n \"\"\"Assert the condition `x != y` holds for all elements.\r\n\r\n This Op checks that `x[i] != y[i]` holds for every pair of (possibly\r\n broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is\r\n trivially satisfied.\r\n\r\n If any elements of `x` and `y` are equal, `message`, as well as the first\r\n `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError`\r\n is raised.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to\r\n \"assert_none_equal\".\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x != y` is ever False. This can\r\n be used with `tf.control_dependencies` inside of `tf.function`s to block\r\n followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: if the check can be performed immediately and\r\n `x != y` is False for any pair of elements in `x` and `y`. The check can\r\n be performed immediately during eager execution or if `x` and `y` are\r\n statically known.\r\n \"\"\"\r\n return assert_none_equal(x=x, y=y, summarize=summarize, message=message,\r\n name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_none_equal', 'assert_none_equal'])\r\[email protected]_endpoints('assert_none_equal')\r\ndef assert_none_equal(\r\n x, y, data=None, summarize=None, message=None, name=None):\r\n \"\"\"Assert the condition `x != y` holds for all elements.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_none_equal(x, y)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n This condition holds if for every pair of (possibly broadcast) elements\r\n `x[i]`, `y[i]`, we have `x[i] != y[i]`.\r\n If both `x` and `y` are empty, this is trivially satisfied.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and first few entries of `x`, `y`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional).\r\n Defaults to \"assert_none_equal\".\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x != y` is ever False.\r\n \"\"\"\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_none_equal', [x, y, data]):\r\n x = ops.convert_to_tensor(x, name='x')\r\n y = ops.convert_to_tensor(y, name='y')\r\n if context.executing_eagerly():\r\n x_name = _shape_and_dtype_str(x)\r\n y_name = _shape_and_dtype_str(y)\r\n else:\r\n x_name = x.name\r\n y_name = y.name\r\n\r\n if data is None:\r\n data = [\r\n message,\r\n 'Condition x != y did not hold for every single element:',\r\n 'x (%s) = ' % x_name, x,\r\n 'y (%s) = ' % y_name, y\r\n ]\r\n condition = math_ops.reduce_all(math_ops.not_equal(x, y))\r\n return control_flow_ops.Assert(condition, data, summarize=summarize)\r\n\r\n\r\n@tf_export('debugging.assert_near', v1=[])\r\ndef assert_near_v2(x, y, rtol=None, atol=None, message=None, summarize=None,\r\n name=None):\r\n \"\"\"Assert the condition `x` and `y` are close element-wise.\r\n\r\n This Op checks that `x[i] - y[i] < atol + rtol * tf.abs(y[i])` holds for every\r\n pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are\r\n empty, this is trivially satisfied.\r\n\r\n If any elements of `x` and `y` are not close, `message`, as well as the first\r\n `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError`\r\n is raised.\r\n\r\n The default `atol` and `rtol` is `10 * eps`, where `eps` is the smallest\r\n representable positive number such that `1 + eps != 1`. This is about\r\n `1.2e-6` in `32bit`, `2.22e-15` in `64bit`, and `0.00977` in `16bit`.\r\n See `numpy.finfo`.\r\n\r\n Args:\r\n x: Float or complex `Tensor`.\r\n y: Float or complex `Tensor`, same dtype as and broadcastable to `x`.\r\n rtol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.\r\n The relative tolerance. Default is `10 * eps`.\r\n atol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.\r\n The absolute tolerance. Default is `10 * eps`.\r\n message: A string to prefix to the default message.\r\n summarize: Print this many entries of each tensor.\r\n name: A name for this operation (optional). Defaults to \"assert_near\".\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x` and `y` are not close enough.\r\n This can be used with `tf.control_dependencies` inside of `tf.function`s\r\n to block followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: if the check can be performed immediately and\r\n `x != y` is False for any pair of elements in `x` and `y`. The check can\r\n be performed immediately during eager execution or if `x` and `y` are\r\n statically known.\r\n\r\n @compatibility(numpy)\r\n Similar to `numpy.assert_allclose`, except tolerance depends on data type.\r\n This is due to the fact that `TensorFlow` is often used with `32bit`, `64bit`,\r\n and even `16bit` data.\r\n @end_compatibility\r\n \"\"\"\r\n return assert_near(x=x, y=y, rtol=rtol, atol=atol, summarize=summarize,\r\n message=message, name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_near', 'assert_near'])\r\[email protected]_endpoints('assert_near')\r\ndef assert_near(\r\n x, y, rtol=None, atol=None, data=None, summarize=None, message=None,\r\n name=None):\r\n \"\"\"Assert the condition `x` and `y` are close element-wise.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_near(x, y)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n This condition holds if for every pair of (possibly broadcast) elements\r\n `x[i]`, `y[i]`, we have\r\n\r\n ```tf.abs(x[i] - y[i]) <= atol + rtol * tf.abs(y[i])```.\r\n\r\n If both `x` and `y` are empty, this is trivially satisfied.\r\n\r\n The default `atol` and `rtol` is `10 * eps`, where `eps` is the smallest\r\n representable positive number such that `1 + eps != 1`. This is about\r\n `1.2e-6` in `32bit`, `2.22e-15` in `64bit`, and `0.00977` in `16bit`.\r\n See `numpy.finfo`.\r\n\r\n Args:\r\n x: Float or complex `Tensor`.\r\n y: Float or complex `Tensor`, same `dtype` as, and broadcastable to, `x`.\r\n rtol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.\r\n The relative tolerance. Default is `10 * eps`.\r\n atol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.\r\n The absolute tolerance. Default is `10 * eps`.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and first few entries of `x`, `y`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to \"assert_near\".\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x` and `y` are not close enough.\r\n\r\n @compatibility(numpy)\r\n Similar to `numpy.assert_allclose`, except tolerance depends on data type.\r\n This is due to the fact that `TensorFlow` is often used with `32bit`, `64bit`,\r\n and even `16bit` data.\r\n @end_compatibility\r\n \"\"\"\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_near', [x, y, rtol, atol, data]):\r\n x = ops.convert_to_tensor(x, name='x')\r\n y = ops.convert_to_tensor(y, name='y', dtype=x.dtype)\r\n\r\n eps = np.finfo(x.dtype.as_numpy_dtype).eps\r\n rtol = 10 * eps if rtol is None else rtol\r\n atol = 10 * eps if atol is None else atol\r\n\r\n rtol = ops.convert_to_tensor(rtol, name='rtol', dtype=x.dtype)\r\n atol = ops.convert_to_tensor(atol, name='atol', dtype=x.dtype)\r\n\r\n if context.executing_eagerly():\r\n x_name = _shape_and_dtype_str(x)\r\n y_name = _shape_and_dtype_str(y)\r\n else:\r\n x_name = x.name\r\n y_name = y.name\r\n\r\n if data is None:\r\n data = [\r\n message,\r\n 'x and y not equal to tolerance rtol = %s, atol = %s' % (rtol, atol),\r\n 'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y\r\n ]\r\n tol = atol + rtol * math_ops.abs(y)\r\n diff = math_ops.abs(x - y)\r\n condition = math_ops.reduce_all(math_ops.less(diff, tol))\r\n return control_flow_ops.Assert(condition, data, summarize=summarize)\r\n\r\n\r\n@tf_export('debugging.assert_less', 'assert_less', v1=[])\r\ndef assert_less_v2(x, y, message=None, summarize=None, name=None):\r\n \"\"\"Assert the condition `x < y` holds element-wise.\r\n\r\n This Op checks that `x[i] < y[i]` holds for every pair of (possibly\r\n broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is\r\n trivially satisfied.\r\n\r\n If `x` is not less than `y` element-wise, `message`, as well as the first\r\n `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is\r\n raised.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\r\n message: A string to prefix to the default message.\r\n summarize: Print this many entries of each tensor.\r\n name: A name for this operation (optional). Defaults to \"assert_less\".\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x < y` is False.\r\n This can be used with `tf.control_dependencies` inside of `tf.function`s\r\n to block followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: if the check can be performed immediately and\r\n `x < y` is False. The check can be performed immediately during eager\r\n execution or if `x` and `y` are statically known.\r\n \"\"\"\r\n return assert_less(x=x, y=y, summarize=summarize, message=message, name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_less', 'assert_less'])\r\ndef assert_less(x, y, data=None, summarize=None, message=None, name=None):\r\n \"\"\"Assert the condition `x < y` holds element-wise.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_less(x, y)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n This condition holds if for every pair of (possibly broadcast) elements\r\n `x[i]`, `y[i]`, we have `x[i] < y[i]`.\r\n If both `x` and `y` are empty, this is trivially satisfied.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and first few entries of `x`, `y`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to \"assert_less\".\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x < y` is False.\r\n \"\"\"\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_less', [x, y, data]):\r\n x = ops.convert_to_tensor(x, name='x')\r\n y = ops.convert_to_tensor(y, name='y')\r\n if context.executing_eagerly():\r\n x_name = _shape_and_dtype_str(x)\r\n y_name = _shape_and_dtype_str(y)\r\n else:\r\n x_name = x.name\r\n y_name = y.name\r\n\r\n if data is None:\r\n data = [\r\n message,\r\n 'Condition x < y did not hold element-wise:',\r\n 'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y\r\n ]\r\n condition = math_ops.reduce_all(math_ops.less(x, y))\r\n return control_flow_ops.Assert(condition, data, summarize=summarize)\r\n\r\n\r\n@tf_export('debugging.assert_less_equal', v1=[])\r\ndef assert_less_equal_v2(x, y, message=None, summarize=None, name=None):\r\n \"\"\"Assert the condition `x <= y` holds element-wise.\r\n\r\n This Op checks that `x[i] <= y[i]` holds for every pair of (possibly\r\n broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is\r\n trivially satisfied.\r\n\r\n If `x` is not less or equal than `y` element-wise, `message`, as well as the\r\n first `summarize` entries of `x` and `y` are printed, and\r\n `InvalidArgumentError` is raised.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\r\n message: A string to prefix to the default message.\r\n summarize: Print this many entries of each tensor.\r\n name: A name for this operation (optional). Defaults to \"assert_less_equal\".\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x <= y` is False. This can be\r\n used with `tf.control_dependencies` inside of `tf.function`s to block\r\n followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: if the check can be performed immediately and\r\n `x <= y` is False. The check can be performed immediately during eager\r\n execution or if `x` and `y` are statically known.\r\n \"\"\"\r\n return assert_less_equal(x=x, y=y,\r\n summarize=summarize, message=message, name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_less_equal', 'assert_less_equal'])\r\[email protected]_endpoints('assert_less_equal')\r\ndef assert_less_equal(x, y, data=None, summarize=None, message=None, name=None):\r\n \"\"\"Assert the condition `x <= y` holds element-wise.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_less_equal(x, y)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n This condition holds if for every pair of (possibly broadcast) elements\r\n `x[i]`, `y[i]`, we have `x[i] <= y[i]`.\r\n If both `x` and `y` are empty, this is trivially satisfied.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and first few entries of `x`, `y`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to \"assert_less_equal\"\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x <= y` is False.\r\n \"\"\"\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_less_equal', [x, y, data]):\r\n x = ops.convert_to_tensor(x, name='x')\r\n y = ops.convert_to_tensor(y, name='y')\r\n if context.executing_eagerly():\r\n x_name = _shape_and_dtype_str(x)\r\n y_name = _shape_and_dtype_str(y)\r\n else:\r\n x_name = x.name\r\n y_name = y.name\r\n\r\n if data is None:\r\n data = [\r\n message,\r\n 'Condition x <= y did not hold element-wise:'\r\n 'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y\r\n ]\r\n condition = math_ops.reduce_all(math_ops.less_equal(x, y))\r\n return control_flow_ops.Assert(condition, data, summarize=summarize)\r\n\r\n\r\n@tf_export('debugging.assert_greater', 'assert_greater', v1=[])\r\ndef assert_greater_v2(x, y, message=None, summarize=None, name=None):\r\n \"\"\"Assert the condition `x > y` holds element-wise.\r\n\r\n This Op checks that `x[i] > y[i]` holds for every pair of (possibly\r\n broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is\r\n trivially satisfied.\r\n\r\n If `x` is not greater than `y` element-wise, `message`, as well as the first\r\n `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is\r\n raised.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\r\n message: A string to prefix to the default message.\r\n summarize: Print this many entries of each tensor.\r\n name: A name for this operation (optional). Defaults to \"assert_greater\".\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x > y` is False. This can be\r\n used with `tf.control_dependencies` inside of `tf.function`s to block\r\n followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: if the check can be performed immediately and\r\n `x > y` is False. The check can be performed immediately during eager\r\n execution or if `x` and `y` are statically known.\r\n \"\"\"\r\n return assert_greater(x=x, y=y, summarize=summarize, message=message,\r\n name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_greater', 'assert_greater'])\r\ndef assert_greater(x, y, data=None, summarize=None, message=None, name=None):\r\n \"\"\"Assert the condition `x > y` holds element-wise.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_greater(x, y)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n This condition holds if for every pair of (possibly broadcast) elements\r\n `x[i]`, `y[i]`, we have `x[i] > y[i]`.\r\n If both `x` and `y` are empty, this is trivially satisfied.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and first few entries of `x`, `y`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to \"assert_greater\".\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x > y` is False.\r\n \"\"\"\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_greater', [x, y, data]):\r\n x = ops.convert_to_tensor(x, name='x')\r\n y = ops.convert_to_tensor(y, name='y')\r\n if context.executing_eagerly():\r\n x_name = _shape_and_dtype_str(x)\r\n y_name = _shape_and_dtype_str(y)\r\n else:\r\n x_name = x.name\r\n y_name = y.name\r\n\r\n if data is None:\r\n data = [\r\n message,\r\n 'Condition x > y did not hold element-wise:'\r\n 'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y\r\n ]\r\n condition = math_ops.reduce_all(math_ops.greater(x, y))\r\n return control_flow_ops.Assert(condition, data, summarize=summarize)\r\n\r\n\r\n@tf_export('debugging.assert_greater_equal', v1=[])\r\ndef assert_greater_equal_v2(x, y, message=None, summarize=None, name=None):\r\n \"\"\"Assert the condition `x >= y` holds element-wise.\r\n\r\n This Op checks that `x[i] >= y[i]` holds for every pair of (possibly\r\n broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is\r\n trivially satisfied.\r\n\r\n If `x` is not greater or equal to `y` element-wise, `message`, as well as the\r\n first `summarize` entries of `x` and `y` are printed, and\r\n `InvalidArgumentError` is raised.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\r\n message: A string to prefix to the default message.\r\n summarize: Print this many entries of each tensor.\r\n name: A name for this operation (optional). Defaults to\r\n \"assert_greater_equal\".\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x >= y` is False. This can be\r\n used with `tf.control_dependencies` inside of `tf.function`s to block\r\n followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: if the check can be performed immediately and\r\n `x >= y` is False. The check can be performed immediately during eager\r\n execution or if `x` and `y` are statically known.\r\n \"\"\"\r\n return assert_greater_equal(x=x, y=y, summarize=summarize, message=message,\r\n name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_greater_equal', 'assert_greater_equal'])\r\[email protected]_endpoints('assert_greater_equal')\r\ndef assert_greater_equal(x, y, data=None, summarize=None, message=None,\r\n name=None):\r\n \"\"\"Assert the condition `x >= y` holds element-wise.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_greater_equal(x, y)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n This condition holds if for every pair of (possibly broadcast) elements\r\n `x[i]`, `y[i]`, we have `x[i] >= y[i]`.\r\n If both `x` and `y` are empty, this is trivially satisfied.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and first few entries of `x`, `y`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to\r\n \"assert_greater_equal\"\r\n\r\n Returns:\r\n Op that raises `InvalidArgumentError` if `x >= y` is False.\r\n \"\"\"\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_greater_equal', [x, y, data]):\r\n x = ops.convert_to_tensor(x, name='x')\r\n y = ops.convert_to_tensor(y, name='y')\r\n if context.executing_eagerly():\r\n x_name = _shape_and_dtype_str(x)\r\n y_name = _shape_and_dtype_str(y)\r\n else:\r\n x_name = x.name\r\n y_name = y.name\r\n\r\n if data is None:\r\n data = [\r\n message,\r\n 'Condition x >= y did not hold element-wise:'\r\n 'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y\r\n ]\r\n condition = math_ops.reduce_all(math_ops.greater_equal(x, y))\r\n return control_flow_ops.Assert(condition, data, summarize=summarize)\r\n\r\n\r\ndef _assert_rank_condition(\r\n x, rank, static_condition, dynamic_condition, data, summarize):\r\n \"\"\"Assert `x` has a rank that satisfies a given condition.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n rank: Scalar `Tensor`.\r\n static_condition: A python function that takes `[actual_rank, given_rank]`\r\n and returns `True` if the condition is satisfied, `False` otherwise.\r\n dynamic_condition: An `op` that takes [actual_rank, given_rank]\r\n and return `True` if the condition is satisfied, `False` otherwise.\r\n data: The tensors to print out if the condition is false. Defaults to\r\n error message and first few entries of `x`.\r\n summarize: Print this many entries of each tensor.\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` if `x` fails dynamic_condition.\r\n\r\n Raises:\r\n ValueError: If static checks determine `x` fails static_condition.\r\n \"\"\"\r\n assert_type(rank, dtypes.int32)\r\n\r\n # Attempt to statically defined rank.\r\n rank_static = tensor_util.constant_value(rank)\r\n if rank_static is not None:\r\n if rank_static.ndim != 0:\r\n raise ValueError('Rank must be a scalar.')\r\n\r\n x_rank_static = x.get_shape().ndims\r\n if x_rank_static is not None:\r\n if not static_condition(x_rank_static, rank_static):\r\n raise ValueError(\r\n 'Static rank condition failed', x_rank_static, rank_static)\r\n return control_flow_ops.no_op(name='static_checks_determined_all_ok')\r\n\r\n condition = dynamic_condition(array_ops.rank(x), rank)\r\n\r\n # Add the condition that `rank` must have rank zero. Prevents the bug where\r\n # someone does assert_rank(x, [n]), rather than assert_rank(x, n).\r\n if rank_static is None:\r\n this_data = ['Rank must be a scalar. Received rank: ', rank]\r\n rank_check = assert_rank(rank, 0, data=this_data)\r\n condition = control_flow_ops.with_dependencies([rank_check], condition)\r\n\r\n return control_flow_ops.Assert(condition, data, summarize=summarize)\r\n\r\n\r\n@tf_export('debugging.assert_rank', 'assert_rank', v1=[])\r\ndef assert_rank_v2(x, rank, message=None, name=None):\r\n \"\"\"Assert that `x` has rank equal to `rank`.\r\n\r\n This Op checks that the rank of `x` is equal to `rank`.\r\n\r\n If `x` has a different rank, `message`, as well as the shape of `x` are\r\n printed, and `InvalidArgumentError` is raised.\r\n\r\n Args:\r\n x: `Tensor`.\r\n rank: Scalar integer `Tensor`.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to\r\n \"assert_rank\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless `x` has specified rank.\r\n If static checks determine `x` has correct rank, a `no_op` is returned.\r\n This can be used with `tf.control_dependencies` inside of `tf.function`s\r\n to block followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: if the check can be performed immediately and\r\n `x` does not have rank `rank`. The check can be performed immediately\r\n during eager execution or if the shape of `x` is statically known.\r\n \"\"\"\r\n return assert_rank(x=x, rank=rank, message=message, name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_rank', 'assert_rank'])\r\ndef assert_rank(x, rank, data=None, summarize=None, message=None, name=None):\r\n \"\"\"Assert `x` has rank equal to `rank`.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_rank(x, 2)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n rank: Scalar integer `Tensor`.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and the shape of `x`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to \"assert_rank\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless `x` has specified rank.\r\n If static checks determine `x` has correct rank, a `no_op` is returned.\r\n\r\n Raises:\r\n ValueError: If static checks determine `x` has wrong rank.\r\n \"\"\"\r\n with ops.name_scope(name, 'assert_rank', (x, rank) + tuple(data or [])):\r\n x = ops.convert_to_tensor(x, name='x')\r\n rank = ops.convert_to_tensor(rank, name='rank')\r\n message = message or ''\r\n\r\n static_condition = lambda actual_rank, given_rank: actual_rank == given_rank\r\n dynamic_condition = math_ops.equal\r\n\r\n if context.executing_eagerly():\r\n name = ''\r\n else:\r\n name = x.name\r\n\r\n if data is None:\r\n data = [\r\n message,\r\n 'Tensor %s must have rank' % name, rank, 'Received shape: ',\r\n array_ops.shape(x)\r\n ]\r\n\r\n try:\r\n assert_op = _assert_rank_condition(x, rank, static_condition,\r\n dynamic_condition, data, summarize)\r\n\r\n except ValueError as e:\r\n if e.args[0] == 'Static rank condition failed':\r\n raise ValueError(\r\n '%s. Tensor %s must have rank %d. Received rank %d, shape %s' %\r\n (message, name, e.args[2], e.args[1], x.get_shape()))\r\n else:\r\n raise\r\n\r\n return assert_op\r\n\r\n\r\n@tf_export('debugging.assert_rank_at_least', v1=[])\r\ndef assert_rank_at_least_v2(x, rank, message=None, name=None):\r\n \"\"\"Assert that `x` has rank of at least `rank`.\r\n\r\n This Op checks that the rank of `x` is greater or equal to `rank`.\r\n\r\n If `x` has a rank lower than `rank`, `message`, as well as the shape of `x`\r\n are printed, and `InvalidArgumentError` is raised.\r\n\r\n Args:\r\n x: `Tensor`.\r\n rank: Scalar integer `Tensor`.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to\r\n \"assert_rank_at_least\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless `x` has specified rank or higher.\r\n If static checks determine `x` has correct rank, a `no_op` is returned.\r\n This can be used with `tf.control_dependencies` inside of `tf.function`s\r\n to block followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: `x` does not have rank at least `rank`, but the rank\r\n cannot be statically determined.\r\n ValueError: If static checks determine `x` has mismatched rank.\r\n \"\"\"\r\n return assert_rank_at_least(x=x, rank=rank, message=message, name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_rank_at_least', 'assert_rank_at_least'])\r\[email protected]_endpoints('assert_rank_at_least')\r\ndef assert_rank_at_least(\r\n x, rank, data=None, summarize=None, message=None, name=None):\r\n \"\"\"Assert `x` has rank equal to `rank` or higher.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_rank_at_least(x, 2)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n rank: Scalar `Tensor`.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and first few entries of `x`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional).\r\n Defaults to \"assert_rank_at_least\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless `x` has specified rank or higher.\r\n If static checks determine `x` has correct rank, a `no_op` is returned.\r\n\r\n Raises:\r\n ValueError: If static checks determine `x` has wrong rank.\r\n \"\"\"\r\n with ops.name_scope(\r\n name, 'assert_rank_at_least', (x, rank) + tuple(data or [])):\r\n x = ops.convert_to_tensor(x, name='x')\r\n rank = ops.convert_to_tensor(rank, name='rank')\r\n message = message or ''\r\n\r\n static_condition = lambda actual_rank, given_rank: actual_rank >= given_rank\r\n dynamic_condition = math_ops.greater_equal\r\n\r\n if context.executing_eagerly():\r\n name = ''\r\n else:\r\n name = x.name\r\n\r\n if data is None:\r\n data = [\r\n message,\r\n 'Tensor %s must have rank at least' % name, rank,\r\n 'Received shape: ', array_ops.shape(x)\r\n ]\r\n\r\n try:\r\n assert_op = _assert_rank_condition(x, rank, static_condition,\r\n dynamic_condition, data, summarize)\r\n\r\n except ValueError as e:\r\n if e.args[0] == 'Static rank condition failed':\r\n raise ValueError(\r\n '%s. Tensor %s must have rank at least %d. Received rank %d, '\r\n 'shape %s' % (message, name, e.args[2], e.args[1], x.get_shape()))\r\n else:\r\n raise\r\n\r\n return assert_op\r\n\r\n\r\ndef _static_rank_in(actual_rank, given_ranks):\r\n return actual_rank in given_ranks\r\n\r\n\r\ndef _dynamic_rank_in(actual_rank, given_ranks):\r\n if len(given_ranks) < 1:\r\n return ops.convert_to_tensor(False)\r\n result = math_ops.equal(given_ranks[0], actual_rank)\r\n for given_rank in given_ranks[1:]:\r\n result = math_ops.logical_or(\r\n result, math_ops.equal(given_rank, actual_rank))\r\n return result\r\n\r\n\r\ndef _assert_ranks_condition(\r\n x, ranks, static_condition, dynamic_condition, data, summarize):\r\n \"\"\"Assert `x` has a rank that satisfies a given condition.\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n ranks: Scalar `Tensor`.\r\n static_condition: A python function that takes\r\n `[actual_rank, given_ranks]` and returns `True` if the condition is\r\n satisfied, `False` otherwise.\r\n dynamic_condition: An `op` that takes [actual_rank, given_ranks]\r\n and return `True` if the condition is satisfied, `False` otherwise.\r\n data: The tensors to print out if the condition is false. Defaults to\r\n error message and first few entries of `x`.\r\n summarize: Print this many entries of each tensor.\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` if `x` fails dynamic_condition.\r\n\r\n Raises:\r\n ValueError: If static checks determine `x` fails static_condition.\r\n \"\"\"\r\n for rank in ranks:\r\n assert_type(rank, dtypes.int32)\r\n\r\n # Attempt to statically defined rank.\r\n ranks_static = tuple([tensor_util.constant_value(rank) for rank in ranks])\r\n if not any(r is None for r in ranks_static):\r\n for rank_static in ranks_static:\r\n if rank_static.ndim != 0:\r\n raise ValueError('Rank must be a scalar.')\r\n\r\n x_rank_static = x.get_shape().ndims\r\n if x_rank_static is not None:\r\n if not static_condition(x_rank_static, ranks_static):\r\n raise ValueError(\r\n 'Static rank condition failed', x_rank_static, ranks_static)\r\n return control_flow_ops.no_op(name='static_checks_determined_all_ok')\r\n\r\n condition = dynamic_condition(array_ops.rank(x), ranks)\r\n\r\n # Add the condition that `rank` must have rank zero. Prevents the bug where\r\n # someone does assert_rank(x, [n]), rather than assert_rank(x, n).\r\n for rank, rank_static in zip(ranks, ranks_static):\r\n if rank_static is None:\r\n this_data = ['Rank must be a scalar. Received rank: ', rank]\r\n rank_check = assert_rank(rank, 0, data=this_data)\r\n condition = control_flow_ops.with_dependencies([rank_check], condition)\r\n\r\n return control_flow_ops.Assert(condition, data, summarize=summarize)\r\n\r\n\r\n@tf_export('debugging.assert_rank_in', v1=[])\r\ndef assert_rank_in_v2(x, ranks, message=None, name=None):\r\n \"\"\"Assert that `x` has a rank in `ranks`.\r\n\r\n This Op checks that the rank of `x` is in `ranks`.\r\n\r\n If `x` has a different rank, `message`, as well as the shape of `x` are\r\n printed, and `InvalidArgumentError` is raised.\r\n\r\n Args:\r\n x: `Tensor`.\r\n ranks: `Iterable` of scalar `Tensor` objects.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to \"assert_rank_in\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`.\r\n If static checks determine `x` has matching rank, a `no_op` is returned.\r\n This can be used with `tf.control_dependencies` inside of `tf.function`s\r\n to block followup computation until the check has executed.\r\n @compatibility(eager)\r\n returns None\r\n @end_compatibility\r\n\r\n Raises:\r\n InvalidArgumentError: `x` does not have rank in `ranks`, but the rank cannot\r\n be statically determined.\r\n ValueError: If static checks determine `x` has mismatched rank.\r\n \"\"\"\r\n return assert_rank_in(x=x, ranks=ranks, message=message, name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_rank_in', 'assert_rank_in'])\r\[email protected]_endpoints('assert_rank_in')\r\ndef assert_rank_in(\r\n x, ranks, data=None, summarize=None, message=None, name=None):\r\n \"\"\"Assert `x` has rank in `ranks`.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_rank_in(x, (2, 4))]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n ranks: Iterable of scalar `Tensor` objects.\r\n data: The tensors to print out if the condition is False. Defaults to\r\n error message and first few entries of `x`.\r\n summarize: Print this many entries of each tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional).\r\n Defaults to \"assert_rank_in\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`.\r\n If static checks determine `x` has matching rank, a `no_op` is returned.\r\n\r\n Raises:\r\n ValueError: If static checks determine `x` has mismatched rank.\r\n \"\"\"\r\n with ops.name_scope(\r\n name, 'assert_rank_in', (x,) + tuple(ranks) + tuple(data or [])):\r\n x = ops.convert_to_tensor(x, name='x')\r\n ranks = tuple([ops.convert_to_tensor(rank, name='rank') for rank in ranks])\r\n message = message or ''\r\n\r\n if context.executing_eagerly():\r\n name = ''\r\n else:\r\n name = x.name\r\n\r\n if data is None:\r\n data = [\r\n message, 'Tensor %s must have rank in' % name\r\n ] + list(ranks) + [\r\n 'Received shape: ', array_ops.shape(x)\r\n ]\r\n\r\n try:\r\n assert_op = _assert_ranks_condition(x, ranks, _static_rank_in,\r\n _dynamic_rank_in, data, summarize)\r\n\r\n except ValueError as e:\r\n if e.args[0] == 'Static rank condition failed':\r\n raise ValueError(\r\n '%s. Tensor %s must have rank in %s. Received rank %d, '\r\n 'shape %s' % (message, name, e.args[2], e.args[1], x.get_shape()))\r\n else:\r\n raise\r\n\r\n return assert_op\r\n\r\n\r\n@tf_export('debugging.assert_integer', v1=[])\r\ndef assert_integer_v2(x, message=None, name=None):\r\n \"\"\"Assert that `x` is of integer dtype.\r\n\r\n If `x` has a non-integer type, `message`, as well as the dtype of `x` are\r\n printed, and `InvalidArgumentError` is raised.\r\n\r\n This can always be checked statically, so this method returns nothing.\r\n\r\n Args:\r\n x: A `Tensor`.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to \"assert_integer\".\r\n\r\n Raises:\r\n TypeError: If `x.dtype` is not a non-quantized integer type.\r\n \"\"\"\r\n assert_integer(x=x, message=message, name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_integer', 'assert_integer'])\r\[email protected]_endpoints('assert_integer')\r\ndef assert_integer(x, message=None, name=None):\r\n \"\"\"Assert that `x` is of integer dtype.\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.compat.v1.assert_integer(x)]):\r\n output = tf.reduce_sum(x)\r\n ```\r\n\r\n Args:\r\n x: `Tensor` whose basetype is integer and is not quantized.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to \"assert_integer\".\r\n\r\n Raises:\r\n TypeError: If `x.dtype` is anything other than non-quantized integer.\r\n\r\n Returns:\r\n A `no_op` that does nothing. Type can be determined statically.\r\n \"\"\"\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_integer', [x]):\r\n x = ops.convert_to_tensor(x, name='x')\r\n if not x.dtype.is_integer:\r\n if context.executing_eagerly():\r\n name = 'tensor'\r\n else:\r\n name = x.name\r\n err_msg = (\r\n '%s Expected \"x\" to be integer type. Found: %s of dtype %s'\r\n % (message, name, x.dtype))\r\n raise TypeError(err_msg)\r\n\r\n return control_flow_ops.no_op('statically_determined_was_integer')\r\n\r\n\r\n@tf_export('debugging.assert_type', v1=[])\r\ndef assert_type_v2(tensor, tf_type, message=None, name=None):\r\n \"\"\"Asserts that the given `Tensor` is of the specified type.\r\n\r\n This can always be checked statically, so this method returns nothing.\r\n\r\n Args:\r\n tensor: A `Tensor`.\r\n tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,\r\n etc).\r\n message: A string to prefix to the default message.\r\n name: A name for this operation. Defaults to \"assert_type\"\r\n\r\n Raises:\r\n TypeError: If the tensor's data type doesn't match `tf_type`.\r\n \"\"\"\r\n assert_type(tensor=tensor, tf_type=tf_type, message=message, name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_type', 'assert_type'])\r\[email protected]_endpoints('assert_type')\r\ndef assert_type(tensor, tf_type, message=None, name=None):\r\n \"\"\"Statically asserts that the given `Tensor` is of the specified type.\r\n\r\n Args:\r\n tensor: A `Tensor`.\r\n tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,\r\n etc).\r\n message: A string to prefix to the default message.\r\n name: A name to give this `Op`. Defaults to \"assert_type\"\r\n\r\n Raises:\r\n TypeError: If the tensors data type doesn't match `tf_type`.\r\n\r\n Returns:\r\n A `no_op` that does nothing. Type can be determined statically.\r\n \"\"\"\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_type', [tensor]):\r\n tensor = ops.convert_to_tensor(tensor, name='tensor')\r\n if tensor.dtype != tf_type:\r\n if context.executing_eagerly():\r\n raise TypeError('%s tensor must be of type %s' % (message, tf_type))\r\n else:\r\n raise TypeError('%s %s must be of type %s' % (message, tensor.name,\r\n tf_type))\r\n\r\n return control_flow_ops.no_op('statically_determined_correct_type')\r\n\r\n\r\ndef _dimension_sizes(x):\r\n \"\"\"Gets the dimension sizes of a tensor `x`.\r\n\r\n If a size can be determined statically it is returned as an integer,\r\n otherwise as a tensor.\r\n\r\n If `x` is a scalar it is treated as rank 1 size 1.\r\n\r\n Args:\r\n x: A `Tensor`.\r\n\r\n Returns:\r\n Dimension sizes.\r\n \"\"\"\r\n dynamic_shape = array_ops.shape(x)\r\n rank = x.get_shape().rank\r\n rank_is_known = rank is not None\r\n if rank_is_known and rank == 0:\r\n return tuple([1])\r\n if rank_is_known and rank > 0:\r\n static_shape = x.get_shape().as_list()\r\n sizes = [\r\n int(size) if size is not None else dynamic_shape[i]\r\n for i, size in enumerate(static_shape)\r\n ]\r\n return sizes\r\n has_rank_zero = math_ops.equal(array_ops.rank(x), 0)\r\n return control_flow_ops.cond(\r\n has_rank_zero, lambda: array_ops.constant([1]), lambda: dynamic_shape)\r\n\r\n\r\ndef _symbolic_dimension_sizes(symbolic_shape):\r\n # If len(symbolic_shape) == 0 construct a tuple\r\n if not symbolic_shape:\r\n return tuple([1])\r\n\r\n return symbolic_shape\r\n\r\n\r\ndef _has_known_value(dimension_size):\r\n not_none = dimension_size is not None\r\n try:\r\n int(dimension_size)\r\n can_be_parsed_as_int = True\r\n except (ValueError, TypeError):\r\n can_be_parsed_as_int = False\r\n return not_none and can_be_parsed_as_int\r\n\r\n\r\ndef _is_symbol_for_any_size(symbol):\r\n return symbol in [None, '.']\r\n\r\n\r\n_TensorDimSizes = collections.namedtuple(\r\n '_TensorDimSizes',\r\n ['x', 'unspecified_dim', 'actual_sizes', 'symbolic_sizes'])\r\n\r\n\r\n@tf_export('debugging.assert_shapes', v1=[])\r\ndef assert_shapes_v2(shapes, data=None, summarize=None, message=None,\r\n name=None):\r\n \"\"\"Assert tensor shapes and dimension size relationships between tensors.\r\n\r\n This Op checks that a collection of tensors shape relationships\r\n satisfies given constraints.\r\n\r\n Example:\r\n\r\n ```python\r\n tf.assert_shapes([\r\n (x: ('N', 'Q')),\r\n (y: ('N', 'D')),\r\n (param: ('Q',)),\r\n (scalar: ()),\r\n ])\r\n ```\r\n\r\n If `x`, `y`, `param` or `scalar` does not have a shape that satisfies\r\n all specified constraints, `message`, as well as the first `summarize` entries\r\n of the first encountered violating tensor are printed, and\r\n `InvalidArgumentError` is raised.\r\n\r\n Size entries in the specified shapes are checked against other entries by\r\n their __hash__, except:\r\n - a size entry is interpreted as an explicit size if it can be parsed as an\r\n integer primitive.\r\n - a size entry is interpreted as *any* size if it is None or '.'.\r\n\r\n If the first entry of a shape is `...` (type `Ellipsis`) or '*' that indicates\r\n a variable number of outer dimensions of unspecified size, i.e. the constraint\r\n applies to the inner-most dimensions only.\r\n\r\n Scalar tensors and specified shapes of length zero (excluding the 'inner-most'\r\n prefix) are both treated as having a single dimension of size one.\r\n\r\n Args:\r\n shapes: dictionary with (`Tensor` to shape) items. A shape must be an\r\n iterable.\r\n data: The tensors to print out if the condition is False. Defaults to error\r\n message and first few entries of the violating tensor.\r\n summarize: Print this many entries of the tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to \"assert_shapes\".\r\n\r\n Raises:\r\n ValueError: If static checks determine any shape constraint is violated.\r\n \"\"\"\r\n assert_shapes(\r\n shapes, data=data, summarize=summarize, message=message, name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_shapes'])\r\ndef assert_shapes(shapes, data=None, summarize=None, message=None, name=None):\r\n \"\"\"Assert tensor shapes and dimension size relationships between tensors.\r\n\r\n This Op checks that a collection of tensors shape relationships\r\n satisfies given constraints.\r\n\r\n Example:\r\n\r\n ```python\r\n tf.assert_shapes({\r\n (x, ('N', 'Q')),\r\n (y, ('N', 'D')),\r\n (param, ('Q',)),\r\n (scalar, ())\r\n })\r\n ```\r\n\r\n Example of adding a dependency to an operation:\r\n\r\n ```python\r\n with tf.control_dependencies([tf.assert_shapes(shapes)]):\r\n output = tf.matmul(x, y, transpose_a=True)\r\n ```\r\n\r\n If `x`, `y`, `param` or `scalar` does not have a shape that satisfies\r\n all specified constraints, `message`, as well as the first `summarize` entries\r\n of the first encountered violating tensor are printed, and\r\n `InvalidArgumentError` is raised.\r\n\r\n Size entries in the specified shapes are checked against other entries by\r\n their __hash__, except:\r\n - a size entry is interpreted as an explicit size if it can be parsed as an\r\n integer primitive.\r\n - a size entry is interpreted as *any* size if it is None or '.'.\r\n\r\n If the first entry of a shape is `...` (type `Ellipsis`) or '*' that indicates\r\n a variable number of outer dimensions of unspecified size, i.e. the constraint\r\n applies to the inner-most dimensions only.\r\n\r\n Scalar tensors and specified shapes of length zero (excluding the 'inner-most'\r\n prefix) are both treated as having a single dimension of size one.\r\n\r\n Args:\r\n shapes: dictionary with (`Tensor` to shape) items. A shape must be an\r\n iterable.\r\n data: The tensors to print out if the condition is False. Defaults to error\r\n message and first few entries of the violating tensor.\r\n summarize: Print this many entries of the tensor.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation (optional). Defaults to \"assert_shapes\".\r\n\r\n Returns:\r\n Op raising `InvalidArgumentError` unless all shape constraints are\r\n satisfied.\r\n If static checks determine all constraints are satisfied, a `no_op` is\r\n returned.\r\n\r\n Raises:\r\n ValueError: If static checks determine any shape constraint is violated.\r\n \"\"\"\r\n # If the user manages to assemble a dict containing tensors (possible in\r\n # Graph mode only), make sure we still accept that.\r\n if isinstance(shapes, dict):\r\n shapes = shapes.items()\r\n\r\n message = message or ''\r\n with ops.name_scope(name, 'assert_shapes', [shapes, data]):\r\n # Shape specified as None implies no constraint\r\n shape_constraints = [\r\n (ops.convert_to_tensor(x), s) for x, s in shapes if s is not None\r\n ]\r\n\r\n executing_eagerly = context.executing_eagerly()\r\n\r\n def tensor_name(x):\r\n if executing_eagerly:\r\n return _shape_and_dtype_str(x)\r\n return x.name\r\n\r\n tensor_dim_sizes = []\r\n for tensor, symbolic_shape in shape_constraints:\r\n is_iterable = (\r\n hasattr(symbolic_shape, '__iter__') or\r\n hasattr(symbolic_shape, '__getitem__') # For Python 2 compat.\r\n )\r\n if not is_iterable:\r\n raise ValueError(\r\n '%s. '\r\n 'Tensor %s. Specified shape must be an iterable. '\r\n 'An iterable has the attribute `__iter__` or `__getitem__`. '\r\n 'Received specified shape: %s' %\r\n (message, tensor_name(tensor), symbolic_shape))\r\n\r\n # We convert this into a tuple to handle strings, lists and numpy arrays\r\n symbolic_shape_tuple = tuple(symbolic_shape)\r\n\r\n tensors_specified_innermost = False\r\n for i, symbol in enumerate(symbolic_shape_tuple):\r\n if symbol not in [Ellipsis, '*']:\r\n continue\r\n\r\n if i != 0:\r\n raise ValueError(\r\n '%s. '\r\n 'Tensor %s specified shape index %d. '\r\n 'Symbol `...` or `*` for a variable number of '\r\n 'unspecified dimensions is only allowed as the first entry' %\r\n (message, tensor_name(tensor), i))\r\n\r\n tensors_specified_innermost = True\r\n\r\n # Only include the size of the specified dimensions since the 0th symbol\r\n # is either ellipsis or *\r\n tensor_dim_sizes.append(\r\n _TensorDimSizes(\r\n tensor, tensors_specified_innermost, _dimension_sizes(tensor),\r\n _symbolic_dimension_sizes(\r\n symbolic_shape_tuple[1:]\r\n if tensors_specified_innermost else symbolic_shape_tuple)))\r\n\r\n rank_assertions = []\r\n for sizes in tensor_dim_sizes:\r\n rank = len(sizes.symbolic_sizes)\r\n rank_zero_or_one = rank in [0, 1]\r\n if sizes.unspecified_dim:\r\n if rank_zero_or_one:\r\n # No assertion of rank needed as `x` only need to have rank at least\r\n # 0. See elif rank_zero_or_one case comment.\r\n continue\r\n assertion = assert_rank_at_least(\r\n x=sizes.x,\r\n rank=rank,\r\n data=data,\r\n summarize=summarize,\r\n message=message,\r\n name=name)\r\n elif rank_zero_or_one:\r\n # Rank 0 is treated as rank 1 size 1, i.e. there is\r\n # no distinction between the two in terms of rank.\r\n # See _dimension_sizes.\r\n assertion = assert_rank_in(\r\n x=sizes.x,\r\n ranks=[0, 1],\r\n data=data,\r\n summarize=summarize,\r\n message=message,\r\n name=name)\r\n else:\r\n assertion = assert_rank(\r\n x=sizes.x,\r\n rank=rank,\r\n data=data,\r\n summarize=summarize,\r\n message=message,\r\n name=name)\r\n rank_assertions.append(assertion)\r\n\r\n size_assertions = []\r\n size_specifications = {}\r\n for sizes in tensor_dim_sizes:\r\n for i, size_symbol in enumerate(sizes.symbolic_sizes):\r\n\r\n if _is_symbol_for_any_size(size_symbol):\r\n # Size specified as any implies no constraint\r\n continue\r\n\r\n if sizes.unspecified_dim:\r\n tensor_dim = i - len(sizes.symbolic_sizes)\r\n else:\r\n tensor_dim = i\r\n\r\n if size_symbol in size_specifications or _has_known_value(size_symbol):\r\n if _has_known_value(size_symbol):\r\n specified_size = int(size_symbol)\r\n size_check_message = 'Specified explicitly'\r\n else:\r\n specified_size, specified_by_y, specified_at_dim = \\\r\n size_specifications[size_symbol]\r\n size_check_message = (\r\n 'Specified by tensor %s dimension %d' %\r\n (tensor_name(specified_by_y), specified_at_dim))\r\n\r\n actual_size = sizes.actual_sizes[tensor_dim]\r\n if _has_known_value(actual_size) and _has_known_value(specified_size):\r\n if int(actual_size) != int(specified_size):\r\n raise ValueError(\r\n '%s. %s. Tensor %s dimension %s must have size %d. '\r\n 'Received size %d, shape %s' %\r\n (message, size_check_message, tensor_name(sizes.x),\r\n tensor_dim, specified_size, actual_size,\r\n sizes.x.get_shape()))\r\n # No dynamic assertion needed\r\n continue\r\n\r\n condition = math_ops.equal(\r\n ops.convert_to_tensor(actual_size),\r\n ops.convert_to_tensor(specified_size))\r\n data_ = data\r\n if data is None:\r\n data_ = [\r\n message, size_check_message,\r\n 'Tensor %s dimension' % tensor_name(sizes.x), tensor_dim,\r\n 'must have size', specified_size, 'Received shape: ',\r\n array_ops.shape(sizes.x)\r\n ]\r\n size_assertions.append(\r\n control_flow_ops.Assert(condition, data_, summarize=summarize))\r\n else:\r\n size = sizes.actual_sizes[tensor_dim]\r\n size_specifications[size_symbol] = (size, sizes.x, tensor_dim)\r\n\r\n with ops.control_dependencies(rank_assertions):\r\n shapes_assertion = control_flow_ops.group(size_assertions)\r\n return shapes_assertion\r\n\r\n\r\n# pylint: disable=line-too-long\r\ndef _get_diff_for_monotonic_comparison(x):\r\n \"\"\"Gets the difference x[1:] - x[:-1].\"\"\"\r\n x = array_ops.reshape(x, [-1])\r\n if not is_numeric_tensor(x):\r\n raise TypeError('Expected x to be numeric, instead found: %s' % x)\r\n\r\n # If x has less than 2 elements, there is nothing to compare. So return [].\r\n is_shorter_than_two = math_ops.less(array_ops.size(x), 2)\r\n short_result = lambda: ops.convert_to_tensor([], dtype=x.dtype)\r\n\r\n # With 2 or more elements, return x[1:] - x[:-1]\r\n s_len = array_ops.shape(x) - 1\r\n diff = lambda: array_ops.strided_slice(x, [1], [1] + s_len)- array_ops.strided_slice(x, [0], s_len)\r\n return control_flow_ops.cond(is_shorter_than_two, short_result, diff)\r\n\r\n\r\n@tf_export(\r\n 'debugging.is_numeric_tensor',\r\n v1=['debugging.is_numeric_tensor', 'is_numeric_tensor'])\r\[email protected]_endpoints('is_numeric_tensor')\r\ndef is_numeric_tensor(tensor):\r\n \"\"\"Returns `True` if the elements of `tensor` are numbers.\r\n\r\n Specifically, returns `True` if the dtype of `tensor` is one of the following:\r\n\r\n * `tf.float32`\r\n * `tf.float64`\r\n * `tf.int8`\r\n * `tf.int16`\r\n * `tf.int32`\r\n * `tf.int64`\r\n * `tf.uint8`\r\n * `tf.qint8`\r\n * `tf.qint32`\r\n * `tf.quint8`\r\n * `tf.complex64`\r\n\r\n Returns `False` if `tensor` is of a non-numeric type or if `tensor` is not\r\n a `tf.Tensor` object.\r\n \"\"\"\r\n return isinstance(tensor, ops.Tensor) and tensor.dtype in NUMERIC_TYPES\r\n\r\n\r\n@tf_export(\r\n 'math.is_non_decreasing',\r\n v1=[\r\n 'math.is_non_decreasing', 'debugging.is_non_decreasing',\r\n 'is_non_decreasing'\r\n ])\r\[email protected]_endpoints('debugging.is_non_decreasing',\r\n 'is_non_decreasing')\r\ndef is_non_decreasing(x, name=None):\r\n \"\"\"Returns `True` if `x` is non-decreasing.\r\n\r\n Elements of `x` are compared in row-major order. The tensor `[x[0],...]`\r\n is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`.\r\n If `x` has less than two elements, it is trivially non-decreasing.\r\n\r\n See also: `is_strictly_increasing`\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n name: A name for this operation (optional). Defaults to \"is_non_decreasing\"\r\n\r\n Returns:\r\n Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.\r\n\r\n Raises:\r\n TypeError: if `x` is not a numeric tensor.\r\n \"\"\"\r\n with ops.name_scope(name, 'is_non_decreasing', [x]):\r\n diff = _get_diff_for_monotonic_comparison(x)\r\n # When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True.\r\n zero = ops.convert_to_tensor(0, dtype=diff.dtype)\r\n return math_ops.reduce_all(math_ops.less_equal(zero, diff))\r\n\r\n\r\n@tf_export(\r\n 'math.is_strictly_increasing',\r\n v1=[\r\n 'math.is_strictly_increasing', 'debugging.is_strictly_increasing',\r\n 'is_strictly_increasing'\r\n ])\r\[email protected]_endpoints('debugging.is_strictly_increasing',\r\n 'is_strictly_increasing')\r\ndef is_strictly_increasing(x, name=None):\r\n \"\"\"Returns `True` if `x` is strictly increasing.\r\n\r\n Elements of `x` are compared in row-major order. The tensor `[x[0],...]`\r\n is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`.\r\n If `x` has less than two elements, it is trivially strictly increasing.\r\n\r\n See also: `is_non_decreasing`\r\n\r\n Args:\r\n x: Numeric `Tensor`.\r\n name: A name for this operation (optional).\r\n Defaults to \"is_strictly_increasing\"\r\n\r\n Returns:\r\n Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.\r\n\r\n Raises:\r\n TypeError: if `x` is not a numeric tensor.\r\n \"\"\"\r\n with ops.name_scope(name, 'is_strictly_increasing', [x]):\r\n diff = _get_diff_for_monotonic_comparison(x)\r\n # When len(x) = 1, diff = [], less = [], and reduce_all([]) = True.\r\n zero = ops.convert_to_tensor(0, dtype=diff.dtype)\r\n return math_ops.reduce_all(math_ops.less(zero, diff))\r\n\r\n\r\ndef _assert_same_base_type(items, expected_type=None):\r\n r\"\"\"Asserts all items are of the same base type.\r\n\r\n Args:\r\n items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,\r\n `Operation`, or `IndexedSlices`). Can include `None` elements, which\r\n will be ignored.\r\n expected_type: Expected type. If not specified, assert all items are\r\n of the same base type.\r\n\r\n Returns:\r\n Validated type, or none if neither expected_type nor items provided.\r\n\r\n Raises:\r\n ValueError: If any types do not match.\r\n \"\"\"\r\n original_expected_type = expected_type\r\n mismatch = False\r\n for item in items:\r\n if item is not None:\r\n item_type = item.dtype.base_dtype\r\n if not expected_type:\r\n expected_type = item_type\r\n elif expected_type != item_type:\r\n mismatch = True\r\n break\r\n if mismatch:\r\n # Loop back through and build up an informative error message (this is very\r\n # slow, so we don't do it unless we found an error above).\r\n expected_type = original_expected_type\r\n original_item_str = None\r\n for item in items:\r\n if item is not None:\r\n item_type = item.dtype.base_dtype\r\n if not expected_type:\r\n expected_type = item_type\r\n original_item_str = item.name if hasattr(item, 'name') else str(item)\r\n elif expected_type != item_type:\r\n raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (\r\n item.name if hasattr(item, 'name') else str(item),\r\n item_type, expected_type,\r\n (' as %s' % original_item_str) if original_item_str else ''))\r\n return expected_type # Should be unreachable\r\n else:\r\n return expected_type\r\n\r\n\r\n@tf_export(\r\n 'debugging.assert_same_float_dtype',\r\n v1=['debugging.assert_same_float_dtype', 'assert_same_float_dtype'])\r\[email protected]_endpoints('assert_same_float_dtype')\r\ndef assert_same_float_dtype(tensors=None, dtype=None):\r\n \"\"\"Validate and return float type based on `tensors` and `dtype`.\r\n\r\n For ops such as matrix multiplication, inputs and weights must be of the\r\n same float type. This function validates that all `tensors` are the same type,\r\n validates that type is `dtype` (if supplied), and returns the type. Type must\r\n be a floating point type. If neither `tensors` nor `dtype` is supplied,\r\n the function will return `dtypes.float32`.\r\n\r\n Args:\r\n tensors: Tensors of input values. Can include `None` elements, which will be\r\n ignored.\r\n dtype: Expected type.\r\n\r\n Returns:\r\n Validated type.\r\n\r\n Raises:\r\n ValueError: if neither `tensors` nor `dtype` is supplied, or result is not\r\n float, or the common type of the inputs is not a floating point type.\r\n \"\"\"\r\n if tensors:\r\n dtype = _assert_same_base_type(tensors, dtype)\r\n if not dtype:\r\n dtype = dtypes.float32\r\n elif not dtype.is_floating:\r\n raise ValueError('Expected floating point type, got %s.' % dtype)\r\n return dtype\r\n\r\n\r\n@tf_export('debugging.assert_scalar', v1=[])\r\ndef assert_scalar_v2(tensor, message=None, name=None):\r\n \"\"\"Asserts that the given `tensor` is a scalar.\r\n\r\n This function raises `ValueError` unless it can be certain that the given\r\n `tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is\r\n unknown.\r\n\r\n This is always checked statically, so this method returns nothing.\r\n\r\n Args:\r\n tensor: A `Tensor`.\r\n message: A string to prefix to the default message.\r\n name: A name for this operation. Defaults to \"assert_scalar\"\r\n\r\n Raises:\r\n ValueError: If the tensor is not scalar (rank 0), or if its shape is\r\n unknown.\r\n \"\"\"\r\n assert_scalar(tensor=tensor, message=message, name=name)\r\n\r\n\r\n@tf_export(v1=['debugging.assert_scalar', 'assert_scalar'])\r\[email protected]_endpoints('assert_scalar')\r\ndef assert_scalar(tensor, name=None, message=None):\r\n \"\"\"Asserts that the given `tensor` is a scalar (i.e. zero-dimensional).\r\n\r\n This function raises `ValueError` unless it can be certain that the given\r\n `tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is\r\n unknown.\r\n\r\n Args:\r\n tensor: A `Tensor`.\r\n name: A name for this operation. Defaults to \"assert_scalar\"\r\n message: A string to prefix to the default message.\r\n\r\n Returns:\r\n The input tensor (potentially converted to a `Tensor`).\r\n\r\n Raises:\r\n ValueError: If the tensor is not scalar (rank 0), or if its shape is\r\n unknown.\r\n \"\"\"\r\n with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:\r\n tensor = ops.convert_to_tensor(tensor, name=name_scope)\r\n shape = tensor.get_shape()\r\n if shape.ndims != 0:\r\n if context.executing_eagerly():\r\n raise ValueError('%sExpected scalar shape, saw shape: %s.'\r\n % (message or '', shape,))\r\n else:\r\n raise ValueError('%sExpected scalar shape for %s, saw shape: %s.'\r\n % (message or '', tensor.name, shape))\r\n return tensor\r\n\r\n\r\n@tf_export('ensure_shape')\r\ndef ensure_shape(x, shape, name=None):\r\n \"\"\"Updates the shape of a tensor and checks at runtime that the shape holds.\r\n\r\n For example:\r\n ```python\r\n x = tf.compat.v1.placeholder(tf.int32)\r\n print(x.shape)\r\n ==> TensorShape(None)\r\n y = x * 2\r\n print(y.shape)\r\n ==> TensorShape(None)\r\n\r\n y = tf.ensure_shape(y, (None, 3, 3))\r\n print(y.shape)\r\n ==> TensorShape([Dimension(None), Dimension(3), Dimension(3)])\r\n\r\n with tf.compat.v1.Session() as sess:\r\n # Raises tf.errors.InvalidArgumentError, because the shape (3,) is not\r\n # compatible with the shape (None, 3, 3)\r\n sess.run(y, feed_dict={x: [1, 2, 3]})\r\n\r\n ```\r\n\r\n NOTE: This differs from `Tensor.set_shape` in that it sets the static shape\r\n of the resulting tensor and enforces it at runtime, raising an error if the\r\n tensor's runtime shape is incompatible with the specified shape.\r\n `Tensor.set_shape` sets the static shape of the tensor without enforcing it\r\n at runtime, which may result in inconsistencies between the statically-known\r\n shape of tensors and the runtime value of tensors.\r\n\r\n Args:\r\n x: A `Tensor`.\r\n shape: A `TensorShape` representing the shape of this tensor, a\r\n `TensorShapeProto`, a list, a tuple, or None.\r\n name: A name for this operation (optional). Defaults to \"EnsureShape\".\r\n\r\n Returns:\r\n A `Tensor`. Has the same type and contents as `x`. At runtime, raises a\r\n `tf.errors.InvalidArgumentError` if `shape` is incompatible with the shape\r\n of `x`.\r\n \"\"\"\r\n if not isinstance(shape, tensor_shape.TensorShape):\r\n shape = tensor_shape.TensorShape(shape)\r\n\r\n return array_ops.ensure_shape(x, shape, name=name)\r\n\r\n\r\[email protected]('EnsureShape')\r\ndef _ensure_shape_grad(op, grad):\r\n del op # Unused.\r\n return grad\r\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for tensorflow.python.framework.composite_tensor_utils.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\n\r\nfrom tensorflow.python.framework import composite_tensor_utils\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import sparse_tensor\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.ops.ragged import ragged_tensor\r\nfrom tensorflow.python.ops.ragged import ragged_tensor_value\r\nfrom tensorflow.python.platform import googletest\r\n\r\n\r\nclass CompositeTensorTest(test_util.TensorFlowTestCase):\r\n\r\n def test_is_composite(self):\r\n # Validate that all composite tensor and value types return true.\r\n self.assertTrue(\r\n composite_tensor_utils.is_composite_or_composite_value(\r\n sparse_tensor.SparseTensor([[0, 0]], [1], [1, 1])))\r\n self.assertTrue(\r\n composite_tensor_utils.is_composite_or_composite_value(\r\n sparse_tensor.SparseTensorValue([[0, 0]], [1], [1, 1])))\r\n self.assertTrue(\r\n composite_tensor_utils.is_composite_or_composite_value(\r\n ragged_tensor.RaggedTensor.from_row_splits(\r\n np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))))\r\n self.assertTrue(\r\n composite_tensor_utils.is_composite_or_composite_value(\r\n ragged_tensor_value.RaggedTensorValue(\r\n np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))))\r\n\r\n # Test that numpy arrays and tensors return false.\r\n self.assertFalse(\r\n composite_tensor_utils.is_composite_or_composite_value(\r\n np.ndarray([0, 1])))\r\n self.assertFalse(\r\n composite_tensor_utils.is_composite_or_composite_value(\r\n ops.convert_to_tensor([3, 1])))\r\n\r\n def test_sparse_concatenation(self):\r\n tensor_1 = sparse_tensor.SparseTensor([[0, 0]], [1], [1, 1])\r\n tensor_2 = sparse_tensor.SparseTensor([[0, 0]], [2], [1, 1])\r\n concatenated_tensor = composite_tensor_utils.append_composite_tensor(\r\n tensor_1, tensor_2)\r\n evaluated_tensor = self.evaluate(concatenated_tensor)\r\n self.assertAllEqual(evaluated_tensor.indices, [[0, 0], [1, 0]])\r\n self.assertAllEqual(evaluated_tensor.values, [1, 2])\r\n self.assertAllEqual(evaluated_tensor.dense_shape, [2, 1])\r\n\r\n def test_sparse_value_concatenation(self):\r\n tensor_1 = sparse_tensor.SparseTensorValue([[0, 0]], [1], [1, 1])\r\n tensor_2 = sparse_tensor.SparseTensorValue([[0, 0]], [2], [1, 1])\r\n concatenated_tensor = composite_tensor_utils.append_composite_tensor(\r\n tensor_1, tensor_2)\r\n self.assertAllEqual(concatenated_tensor.indices, [[0, 0], [1, 0]])\r\n self.assertAllEqual(concatenated_tensor.values, [1, 2])\r\n self.assertAllEqual(concatenated_tensor.dense_shape, [2, 1])\r\n\r\n def test_ragged_concatenation(self):\r\n tensor_1 = ragged_tensor.RaggedTensor.from_row_splits(\r\n np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))\r\n tensor_2 = ragged_tensor.RaggedTensor.from_row_splits(\r\n np.array([3, 4, 5]), np.array([0, 2, 3], dtype=np.int64))\r\n concatenated_tensor = composite_tensor_utils.append_composite_tensor(\r\n tensor_1, tensor_2)\r\n evaluated_tensor = self.evaluate(concatenated_tensor)\r\n\r\n self.assertAllEqual(evaluated_tensor.values, [0, 1, 2, 3, 4, 5])\r\n self.assertAllEqual(evaluated_tensor.row_splits, [0, 1, 3, 5, 6])\r\n\r\n def test_ragged_value_concatenation(self):\r\n tensor_1 = ragged_tensor_value.RaggedTensorValue(\r\n np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64))\r\n tensor_2 = ragged_tensor_value.RaggedTensorValue(\r\n np.array([3, 4, 5]), np.array([0, 2, 3], dtype=np.int64))\r\n concatenated_tensor = composite_tensor_utils.append_composite_tensor(\r\n tensor_1, tensor_2)\r\n\r\n self.assertAllEqual(concatenated_tensor.values, [0, 1, 2, 3, 4, 5])\r\n self.assertAllEqual(concatenated_tensor.row_splits, [0, 1, 3, 5, 6])\r\n\r\n\r\nif __name__ == '__main__':\r\n googletest.main()\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Utils for Estimator (deprecated).\r\n\r\nThis module and all its submodules are deprecated. See\r\n[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)\r\nfor migration instructions.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.util import tf_inspect\r\n\r\n\r\ndef assert_estimator_contract(tester, estimator_class):\r\n \"\"\"Asserts whether given estimator satisfies the expected contract.\r\n\r\n This doesn't check every details of contract. This test is used for that a\r\n function is not forgotten to implement in a precanned Estimator.\r\n\r\n Args:\r\n tester: A tf.test.TestCase.\r\n estimator_class: 'type' object of pre-canned estimator.\r\n \"\"\"\r\n attributes = tf_inspect.getmembers(estimator_class)\r\n attribute_names = [a[0] for a in attributes]\r\n\r\n tester.assertTrue('config' in attribute_names)\r\n tester.assertTrue('evaluate' in attribute_names)\r\n tester.assertTrue('export' in attribute_names)\r\n tester.assertTrue('fit' in attribute_names)\r\n tester.assertTrue('get_variable_names' in attribute_names)\r\n tester.assertTrue('get_variable_value' in attribute_names)\r\n tester.assertTrue('model_dir' in attribute_names)\r\n tester.assertTrue('predict' in attribute_names)\r\n\r\n\r\ndef assert_in_range(min_value, max_value, key, metrics):\r\n actual_value = metrics[key]\r\n if actual_value < min_value:\r\n raise ValueError('%s: %s < %s.' % (key, actual_value, min_value))\r\n if actual_value > max_value:\r\n raise ValueError('%s: %s > %s.' % (key, actual_value, max_value))\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Experimental API for controlling threading in `tf.data` pipelines.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\n\r\nfrom tensorflow.python.data.util import options\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n@tf_export(\"data.experimental.ThreadingOptions\")\r\nclass ThreadingOptions(options.OptionsBase):\r\n \"\"\"Represents options for dataset threading.\r\n\r\n You can set the threading options of a dataset through the\r\n `experimental_threading` property of `tf.data.Options`; the property is\r\n an instance of `tf.data.experimental.ThreadingOptions`.\r\n\r\n ```python\r\n options = tf.data.Options()\r\n options.experimental_threading.private_threadpool_size = 10\r\n dataset = dataset.with_options(options)\r\n ```\r\n \"\"\"\r\n\r\n max_intra_op_parallelism = options.create_option(\r\n name=\"max_intra_op_parallelism\",\r\n ty=int,\r\n docstring=\r\n \"If set, it overrides the maximum degree of intra-op parallelism.\")\r\n\r\n private_threadpool_size = options.create_option(\r\n name=\"private_threadpool_size\",\r\n ty=int,\r\n docstring=\r\n \"If set, the dataset will use a private threadpool of the given size.\")\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"SequenceFile Dataset.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.contrib.hadoop.python.ops import gen_dataset_ops\r\nfrom tensorflow.contrib.hadoop.python.ops import hadoop_op_loader # pylint: disable=unused-import\r\nfrom tensorflow.python.data.ops import dataset_ops\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_spec\r\nfrom tensorflow.python.util import deprecation\r\n\r\n\r\nclass SequenceFileDataset(dataset_ops.DatasetSource):\r\n \"\"\"A Sequence File Dataset that reads the sequence file.\"\"\"\r\n\r\n @deprecation.deprecated(\r\n None,\r\n \"tf.contrib.hadoop will be removed in 2.0, the support for Apache Hadoop \"\r\n \"will continue to be provided through the tensorflow/io GitHub project.\")\r\n def __init__(self, filenames):\r\n \"\"\"Create a `SequenceFileDataset`.\r\n\r\n `SequenceFileDataset` allows a user to read data from a hadoop sequence\r\n file. A sequence file consists of (key value) pairs sequentially. At\r\n the moment, `org.apache.hadoop.io.Text` is the only serialization type\r\n being supported, and there is no compression support.\r\n\r\n For example:\r\n\r\n ```python\r\n tf.compat.v1.enable_eager_execution()\r\n\r\n dataset = tf.contrib.hadoop.SequenceFileDataset(\"/foo/bar.seq\")\r\n # Prints the (key, value) pairs inside a hadoop sequence file.\r\n for key, value in dataset:\r\n print(key, value)\r\n ```\r\n\r\n Args:\r\n filenames: A `tf.string` tensor containing one or more filenames.\r\n \"\"\"\r\n self._filenames = ops.convert_to_tensor(\r\n filenames, dtype=dtypes.string, name=\"filenames\")\r\n variant_tensor = gen_dataset_ops.sequence_file_dataset(\r\n self._filenames, self._flat_types)\r\n super(SequenceFileDataset, self).__init__(variant_tensor)\r\n\r\n @property\r\n def element_spec(self):\r\n return (tensor_spec.TensorSpec([], dtypes.string),\r\n tensor_spec.TensorSpec([], dtypes.string))\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Operations for automatic batching and unbatching.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.eager import function\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_spec\r\nfrom tensorflow.python.ops import gen_batch_ops\r\n# pylint: disable=wildcard-import\r\nfrom tensorflow.python.ops.gen_batch_ops import *\r\n# pylint: enable=wildcard-import\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n@tf_export(\"nondifferentiable_batch_function\")\r\ndef batch_function(num_batch_threads,\r\n max_batch_size,\r\n batch_timeout_micros,\r\n allowed_batch_sizes=None,\r\n max_enqueued_batches=10,\r\n autograph=True):\r\n \"\"\"Batches the computation done by the decorated function.\r\n\r\n So, for example, in the following code\r\n\r\n ```python\r\n @batch_function(1, 2, 3)\r\n def layer(a):\r\n return tf.matmul(a, a)\r\n\r\n b = layer(w)\r\n ```\r\n\r\n if more than one session.run call is simultaneously trying to compute `b`\r\n the values of `w` will be gathered, non-deterministically concatenated\r\n along the first axis, and only one thread will run the computation. See the\r\n documentation of the `Batch` op for more details.\r\n\r\n Assumes that all arguments of the decorated function are Tensors which will\r\n be batched along their first dimension.\r\n\r\n SparseTensor is not supported. The return value of the decorated function\r\n must be a Tensor or a list/tuple of Tensors.\r\n\r\n Args:\r\n num_batch_threads: Number of scheduling threads for processing batches\r\n of work. Determines the number of batches processed in parallel.\r\n max_batch_size: Batch sizes will never be bigger than this.\r\n batch_timeout_micros: Maximum number of microseconds to wait before\r\n outputting an incomplete batch.\r\n allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,\r\n does nothing. Otherwise, supplies a list of batch sizes, causing the op\r\n to pad batches up to one of those sizes. The entries must increase\r\n monotonically, and the final entry must equal max_batch_size.\r\n max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.\r\n autograph: Whether to use autograph to compile python and eager style code\r\n for efficient graph-mode execution.\r\n\r\n Returns:\r\n The decorated function will return the unbatched computation output Tensors.\r\n \"\"\"\r\n\r\n def decorator(fn): # pylint: disable=missing-docstring\r\n\r\n def decorated(*args): # pylint: disable=missing-docstring\r\n\r\n @function.defun(autograph=autograph)\r\n def computation(*computation_args):\r\n return fn(*computation_args)\r\n\r\n computation = computation.get_concrete_function(\r\n *[tensor_spec.TensorSpec(dtype=x.dtype, shape=x.shape, name=str(i))\r\n for i, x in enumerate(args)])\r\n\r\n with ops.name_scope(\"batch\") as name:\r\n for a in args:\r\n if not isinstance(a, ops.Tensor):\r\n raise ValueError(\"All arguments to functions decorated with \"\r\n \"`batch_function` are supposed to be Tensors; \"\r\n \"found %s\" % repr(a))\r\n return gen_batch_ops.batch_function(\r\n num_batch_threads=num_batch_threads,\r\n max_batch_size=max_batch_size,\r\n batch_timeout_micros=batch_timeout_micros,\r\n allowed_batch_sizes=allowed_batch_sizes,\r\n max_enqueued_batches=max_enqueued_batches,\r\n shared_name=name,\r\n f=computation,\r\n in_tensors=list(args),\r\n captured_tensors=computation.captured_inputs,\r\n Tout=[o.dtype for o in computation.outputs])\r\n\r\n return decorated\r\n\r\n return decorator\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Functional tests for 3d pooling operations.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\n\r\nfrom tensorflow.compiler.tests import xla_test\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import gen_nn_ops\r\nfrom tensorflow.python.ops import nn_ops\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\n# Wrapper around AvgPoolGrad that ignores extra arguments needed by\r\n# MaxPoolGrad.\r\ndef _AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding):\r\n del outputs # Unused by average-pooling gradients.\r\n return gen_nn_ops.avg_pool3d_grad(\r\n inputs.get_shape().as_list(),\r\n output_gradients,\r\n ksize=ksize,\r\n strides=strides,\r\n padding=padding)\r\n\r\n\r\nclass Pooling3DTest(xla_test.XLATestCase):\r\n\r\n def _VerifyValues(self, pool_func, input_sizes, window, strides, padding,\r\n expected):\r\n \"\"\"Verifies the output values of the pooling function.\r\n\r\n Args:\r\n pool_func: Function to be called: co.MaxPool, co.AvgPool.\r\n input_sizes: Input tensor dimensions.\r\n window: Tuple of kernel dims: planes, rows, cols.\r\n strides: Tuple of strides for dims: planes, rows, cols.\r\n padding: Padding type.\r\n expected: An array containing the expected operation outputs.\r\n \"\"\"\r\n total_size = 1\r\n for s in input_sizes:\r\n total_size *= s\r\n # Initializes the input tensor with array containing incrementing\r\n # numbers from 1.\r\n x = np.arange(1.0, total_size + 1, dtype=np.float32)\r\n x = x.reshape(input_sizes)\r\n with self.session() as sess, self.test_scope():\r\n inputs = array_ops.placeholder(dtypes.float32)\r\n t = pool_func(\r\n inputs,\r\n ksize=[1] + window + [1],\r\n strides=[1] + strides + [1],\r\n padding=padding)\r\n vals = sess.run(t, {inputs: x})\r\n # Verifies values.\r\n actual = vals.flatten()\r\n self.assertAllClose(expected, actual)\r\n\r\n def testAvgPool3dValidPadding(self):\r\n expected_output = [20.5, 21.5, 22.5]\r\n self._VerifyValues(\r\n nn_ops.avg_pool3d,\r\n input_sizes=[1, 3, 3, 3, 3],\r\n window=[2, 2, 2],\r\n strides=[2, 2, 2],\r\n padding=\"VALID\",\r\n expected=expected_output)\r\n\r\n def testAvgPool3dSamePadding(self):\r\n expected_output = [20.5, 21.5, 22.5, 26.5, 27.5, 28.5]\r\n self._VerifyValues(\r\n nn_ops.avg_pool3d,\r\n input_sizes=[1, 2, 2, 4, 3],\r\n window=[2, 2, 2],\r\n strides=[2, 2, 2],\r\n padding=\"SAME\",\r\n expected=expected_output)\r\n\r\n def testAvgPool3dSamePaddingDifferentStrides(self):\r\n expected_output = [1.5, 4.5, 7.5, 17.5, 20.5, 23.5, 33.5, 36.5, 39.5]\r\n self._VerifyValues(\r\n nn_ops.avg_pool3d,\r\n input_sizes=[1, 5, 8, 1, 1],\r\n window=[1, 2, 3],\r\n strides=[2, 3, 1],\r\n padding=\"SAME\",\r\n expected=expected_output)\r\n\r\n def testMaxPool3dValidPadding(self):\r\n expected_output = [40.0, 41.0, 42.0]\r\n self._VerifyValues(\r\n nn_ops.max_pool3d,\r\n input_sizes=[1, 3, 3, 3, 3],\r\n window=[2, 2, 2],\r\n strides=[2, 2, 2],\r\n padding=\"VALID\",\r\n expected=expected_output)\r\n\r\n def testMaxPool3dSamePadding(self):\r\n expected_output = [31., 32., 33., 34., 35., 36.]\r\n self._VerifyValues(\r\n nn_ops.max_pool3d,\r\n input_sizes=[1, 2, 2, 3, 3],\r\n window=[2, 2, 2],\r\n strides=[2, 2, 2],\r\n padding=\"SAME\",\r\n expected=expected_output)\r\n\r\n def testMaxPool3dSamePaddingDifferentStrides(self):\r\n expected_output = [2., 5., 8., 18., 21., 24., 34., 37., 40.]\r\n self._VerifyValues(\r\n nn_ops.max_pool3d,\r\n input_sizes=[1, 5, 8, 1, 1],\r\n window=[1, 2, 3],\r\n strides=[2, 3, 1],\r\n padding=\"SAME\",\r\n expected=expected_output)\r\n\r\n # Test pooling on a larger input, with different stride and kernel\r\n # size for the 'z' dimension.\r\n\r\n # Simulate max pooling in numpy to get the expected output.\r\n input_data = np.arange(1, 5 * 27 * 27 * 64 + 1).reshape((5, 27, 27, 64))\r\n input_data = np.pad(input_data, [[0, 0], [0, 1], [0, 1], [0, 0]],\r\n mode=\"constant\")\r\n expected_output = input_data[:, 1::2, 1::2, :]\r\n expected_output[:, -1, :, :] = input_data[:, -2, 1::2, :]\r\n expected_output[:, :, -1, :] = input_data[:, 1::2, -2, :]\r\n expected_output[:, -1, -1, :] = input_data[:, -2, -2, :]\r\n\r\n self._VerifyValues(\r\n nn_ops.max_pool3d,\r\n input_sizes=[1, 5, 27, 27, 64],\r\n window=[1, 2, 2],\r\n strides=[1, 2, 2],\r\n padding=\"SAME\",\r\n expected=expected_output.flatten())\r\n\r\n def testKernelSmallerThanStride(self):\r\n self._VerifyValues(\r\n nn_ops.max_pool3d,\r\n input_sizes=[1, 3, 3, 3, 1],\r\n window=[1, 1, 1],\r\n strides=[2, 2, 2],\r\n padding=\"SAME\",\r\n expected=[1, 3, 7, 9, 19, 21, 25, 27])\r\n\r\n self._VerifyValues(\r\n nn_ops.max_pool3d,\r\n input_sizes=[1, 7, 7, 7, 1],\r\n window=[2, 2, 2],\r\n strides=[3, 3, 3],\r\n padding=\"VALID\",\r\n expected=[58, 61, 79, 82, 205, 208, 226, 229])\r\n\r\n self._VerifyValues(\r\n nn_ops.avg_pool3d,\r\n input_sizes=[1, 3, 3, 3, 1],\r\n window=[1, 1, 1],\r\n strides=[2, 2, 2],\r\n padding=\"SAME\",\r\n expected=[1, 3, 7, 9, 19, 21, 25, 27])\r\n\r\n self._VerifyValues(\r\n nn_ops.avg_pool3d,\r\n input_sizes=[1, 7, 7, 7, 1],\r\n window=[2, 2, 2],\r\n strides=[3, 3, 3],\r\n padding=\"VALID\",\r\n expected=[29.5, 32.5, 50.5, 53.5, 176.5, 179.5, 197.5, 200.5])\r\n\r\n def _VerifyGradient(self,\r\n pool_func,\r\n pool_grad_func,\r\n input_sizes,\r\n ksize,\r\n strides,\r\n padding,\r\n pool_grad_grad_func=None):\r\n \"\"\"Verifies the output values of the pooling gradient function.\r\n\r\n Args:\r\n pool_func: Forward pooling function\r\n pool_grad_func: Pooling gradient function for pool_grad_func\r\n input_sizes: Input tensor dimensions.\r\n ksize: The kernel size dimensions\r\n strides: The stride dimensions\r\n padding: Padding type.\r\n pool_grad_grad_func: Second-order gradient function, if available.\r\n \"\"\"\r\n ksize = [1] + ksize + [1]\r\n strides = [1] + strides + [1]\r\n total_size = np.prod(input_sizes)\r\n x = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_sizes)\r\n with self.session() as sess:\r\n # Use the forward pool function to compute some corresponding outputs\r\n # (needed for the CPU device, and we need the shape in both cases).\r\n with ops.device(\"CPU\"):\r\n inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)\r\n outputs = pool_func(\r\n inputs,\r\n ksize=ksize,\r\n strides=strides,\r\n padding=padding)\r\n\r\n output_vals = np.array(sess.run(outputs, {inputs: x}))\r\n output_gradient_vals = np.arange(\r\n 1, output_vals.size + 1, dtype=np.float32)\r\n output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)\r\n output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)\r\n output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)\r\n\r\n # Use the Tensorflow CPU pooling gradient to compute the expected input\r\n # gradients.\r\n with ops.device(\"CPU\"):\r\n output_gradients = array_ops.placeholder(\r\n dtypes.float32, shape=output_vals.shape)\r\n expected_input_gradients = pool_grad_func(\r\n inputs,\r\n outputs,\r\n output_gradients,\r\n ksize=ksize,\r\n strides=strides,\r\n padding=padding)\r\n expected_input_gradient_vals = sess.run(\r\n expected_input_gradients,\r\n {inputs: x,\r\n output_gradients: output_gradient_vals})\r\n\r\n output_grad_gradients = array_ops.placeholder(\r\n dtypes.float32, shape=expected_input_gradient_vals.shape)\r\n if pool_grad_grad_func is not None:\r\n expected_grad_gradients = pool_grad_grad_func(\r\n inputs,\r\n outputs,\r\n output_grad_gradients,\r\n ksize=ksize,\r\n strides=strides,\r\n padding=padding,\r\n data_format=\"NDHWC\")\r\n expected_grad_gradients_vals = sess.run(expected_grad_gradients, {\r\n inputs: x,\r\n output_grad_gradients: output_grad_grad_vals\r\n })\r\n\r\n # Run the gradient op on the XLA device\r\n with self.test_scope():\r\n outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)\r\n actual_input_gradients = pool_grad_func(\r\n inputs,\r\n outputs,\r\n output_gradients,\r\n ksize=ksize,\r\n strides=strides,\r\n padding=padding)\r\n if pool_grad_grad_func is not None:\r\n actual_grad_gradients = pool_grad_grad_func(\r\n inputs,\r\n outputs,\r\n output_grad_gradients,\r\n ksize=ksize,\r\n strides=strides,\r\n padding=padding,\r\n data_format=\"NDHWC\")\r\n\r\n actual = sess.run(actual_input_gradients, {\r\n inputs: x,\r\n outputs: output_vals,\r\n output_gradients: output_gradient_vals\r\n })\r\n\r\n # Compare the Tensorflow and XLA results.\r\n self.assertAllClose(\r\n expected_input_gradient_vals.flatten(),\r\n actual.flatten(),\r\n rtol=1e-5,\r\n atol=1e-6)\r\n self.assertShapeEqual(actual, inputs)\r\n\r\n if pool_grad_grad_func is not None:\r\n actual_grad_gradients_vals = sess.run(\r\n actual_grad_gradients, {\r\n inputs: x,\r\n outputs: output_vals,\r\n output_grad_gradients: output_grad_grad_vals\r\n })\r\n\r\n # Compare the Tensorflow and XLA results.\r\n self.assertAllClose(\r\n expected_grad_gradients_vals,\r\n actual_grad_gradients_vals,\r\n rtol=1e-4,\r\n atol=1e-6)\r\n self.assertShapeEqual(actual_grad_gradients_vals, outputs)\r\n\r\n def testMaxPoolGradValidPadding1_1_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.max_pool3d,\r\n gen_nn_ops.max_pool3d_grad,\r\n input_sizes=[1, 3, 3, 3, 1],\r\n ksize=[1, 1, 1],\r\n strides=[1, 1, 1],\r\n padding=\"VALID\",\r\n pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)\r\n\r\n def testMaxPoolGradValidPadding2_1_6_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.max_pool3d,\r\n gen_nn_ops.max_pool3d_grad,\r\n input_sizes=[2, 3, 3, 6, 3],\r\n ksize=[2, 2, 2],\r\n strides=[1, 1, 1],\r\n padding=\"VALID\",\r\n pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)\r\n\r\n def testMaxPoolGradValidPadding2_1_7_3d(self):\r\n # TODO(b/73062247): the bfloat16 implementation of MaxPool3DGradGrad does\r\n # not have enough precision for this test case to pass if\r\n # pool_grad_grad_func is passed.\r\n self._VerifyGradient(\r\n nn_ops.max_pool3d,\r\n gen_nn_ops.max_pool3d_grad,\r\n input_sizes=[2, 3, 5, 7, 3],\r\n ksize=[2, 2, 2],\r\n strides=[1, 1, 1],\r\n padding=\"VALID\")\r\n\r\n def testMaxPoolGradValidPadding2_2_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.max_pool3d,\r\n gen_nn_ops.max_pool3d_grad,\r\n input_sizes=[2, 2, 2, 2, 3],\r\n ksize=[2, 2, 2],\r\n strides=[2, 2, 2],\r\n padding=\"VALID\",\r\n pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)\r\n\r\n def testMaxPoolGradSamePadding1_1_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.max_pool3d,\r\n gen_nn_ops.max_pool3d_grad,\r\n input_sizes=[2, 3, 2, 4, 1],\r\n ksize=[1, 1, 1],\r\n strides=[1, 1, 1],\r\n padding=\"SAME\",\r\n pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)\r\n\r\n def testMaxPoolGradSamePadding2_1_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.max_pool3d,\r\n gen_nn_ops.max_pool3d_grad,\r\n input_sizes=[2, 3, 2, 4, 1],\r\n ksize=[2, 2, 2],\r\n strides=[1, 1, 1],\r\n padding=\"SAME\",\r\n pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)\r\n\r\n def testMaxPoolGradSamePadding2_2_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.max_pool3d,\r\n gen_nn_ops.max_pool3d_grad,\r\n input_sizes=[2, 5, 2, 4, 3],\r\n ksize=[2, 2, 2],\r\n strides=[2, 2, 2],\r\n padding=\"SAME\",\r\n pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)\r\n\r\n def testMaxPoolGradSamePadding3_1_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.max_pool3d,\r\n gen_nn_ops.max_pool3d_grad,\r\n input_sizes=[1, 3, 3, 7, 1],\r\n ksize=[3, 3, 3],\r\n strides=[1, 1, 1],\r\n padding=\"SAME\",\r\n pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)\r\n\r\n def testAvgPoolGradValidPadding1_1_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.avg_pool3d,\r\n _AvgPoolGrad,\r\n input_sizes=[2, 3, 3, 3, 3],\r\n ksize=[1, 1, 1],\r\n strides=[1, 1, 1],\r\n padding=\"VALID\")\r\n\r\n def testAvgPoolGradValidPadding2_1_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.avg_pool3d,\r\n _AvgPoolGrad,\r\n input_sizes=[2, 3, 3, 3, 3],\r\n ksize=[2, 2, 2],\r\n strides=[1, 1, 1],\r\n padding=\"VALID\")\r\n\r\n def testAvgPoolGradValidPadding2_2_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.avg_pool3d,\r\n _AvgPoolGrad,\r\n input_sizes=[2, 2, 2, 2, 3],\r\n ksize=[2, 2, 2],\r\n strides=[2, 2, 2],\r\n padding=\"VALID\")\r\n\r\n def testAvgPoolGradSamePadding1_1_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.avg_pool3d,\r\n _AvgPoolGrad,\r\n input_sizes=[2, 3, 2, 4, 3],\r\n ksize=[1, 1, 1],\r\n strides=[1, 1, 1],\r\n padding=\"SAME\")\r\n\r\n def testAvgPoolGradSamePadding2_1_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.avg_pool3d,\r\n _AvgPoolGrad,\r\n input_sizes=[1, 2, 2, 2, 1],\r\n ksize=[2, 2, 2],\r\n strides=[1, 1, 1],\r\n padding=\"SAME\")\r\n\r\n def testAvgPoolGradSamePadding2_2_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.avg_pool3d,\r\n _AvgPoolGrad,\r\n input_sizes=[2, 5, 2, 4, 3],\r\n ksize=[2, 2, 2],\r\n strides=[2, 2, 2],\r\n padding=\"SAME\")\r\n\r\n def testAvgPoolGradSamePadding3_1_3d(self):\r\n self._VerifyGradient(\r\n nn_ops.avg_pool3d,\r\n _AvgPoolGrad,\r\n input_sizes=[1, 3, 6, 7, 1],\r\n ksize=[3, 3, 3],\r\n strides=[1, 1, 1],\r\n padding=\"SAME\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for `tf.data.Dataset.filter_with_legacy_function()`.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.data.kernel_tests import filter_test_base\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\n@test_util.run_v1_only(\"filter_with_legacy_function only available in TF 1.x\")\r\nclass FilterWithLegacyFunctionTest(filter_test_base.FilterTestBase):\r\n\r\n def apply_filter(self, input_dataset, predicate):\r\n return input_dataset.filter_with_legacy_function(predicate)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Functions to test TFLite models.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport numpy as np\r\nfrom six import PY3\r\n\r\nfrom google.protobuf import text_format as _text_format\r\nfrom google.protobuf.message import DecodeError\r\nfrom tensorflow.core.framework import graph_pb2 as _graph_pb2\r\nfrom tensorflow.lite.python import convert_saved_model as _convert_saved_model\r\nfrom tensorflow.lite.python import lite as _lite\r\nfrom tensorflow.lite.python import lite_constants as constants\r\nfrom tensorflow.lite.python import util as _util\r\nfrom tensorflow.python import keras as _keras\r\nfrom tensorflow.python.client import session as _session\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework.importer import import_graph_def as _import_graph_def\r\nfrom tensorflow.python.keras.preprocessing import image\r\nfrom tensorflow.python.lib.io import file_io as _file_io\r\nfrom tensorflow.python.platform import resource_loader as _resource_loader\r\nfrom tensorflow.python.saved_model import load as _load\r\nfrom tensorflow.python.saved_model import loader as _loader\r\nfrom tensorflow.python.saved_model import signature_constants as _signature_constants\r\nfrom tensorflow.python.saved_model import tag_constants as _tag_constants\r\n\r\n\r\ndef get_filepath(filename, base_dir=None):\r\n \"\"\"Returns the full path of the filename.\r\n\r\n Args:\r\n filename: Subdirectory and name of the model file.\r\n base_dir: Base directory containing model file.\r\n\r\n Returns:\r\n str.\r\n \"\"\"\r\n if base_dir is None:\r\n base_dir = \"learning/brain/mobile/tflite_compat_models\"\r\n return os.path.join(_resource_loader.get_root_dir_with_all_resources(),\r\n base_dir, filename)\r\n\r\n\r\ndef get_image(size):\r\n \"\"\"Returns an image loaded into an np.ndarray with dims [1, size, size, 3].\r\n\r\n Args:\r\n size: Size of image.\r\n\r\n Returns:\r\n np.ndarray.\r\n \"\"\"\r\n img_filename = _resource_loader.get_path_to_datafile(\r\n \"testdata/grace_hopper.jpg\")\r\n img = image.load_img(img_filename, target_size=(size, size))\r\n img_array = image.img_to_array(img)\r\n img_array = np.expand_dims(img_array, axis=0)\r\n return img_array\r\n\r\n\r\ndef _convert(converter, **kwargs):\r\n \"\"\"Converts the model.\r\n\r\n Args:\r\n converter: TFLiteConverter object.\r\n **kwargs: Additional arguments to be passed into the converter. Supported\r\n flags are {\"target_ops\", \"post_training_quantize\", \"quantize_to_float16\"}.\r\n\r\n Returns:\r\n The converted TFLite model in serialized format.\r\n\r\n Raises:\r\n ValueError: Invalid version number.\r\n \"\"\"\r\n if \"target_ops\" in kwargs:\r\n converter.target_spec.supported_ops = kwargs[\"target_ops\"]\r\n if \"post_training_quantize\" in kwargs:\r\n converter.optimizations = [_lite.Optimize.DEFAULT]\r\n if kwargs.get(\"quantize_to_float16\", False):\r\n converter.target_spec.supported_types = [constants.FLOAT16]\r\n return converter.convert()\r\n\r\n\r\ndef _get_input_data_map(tflite_model, input_data):\r\n \"\"\"Generates a map of input data based on the TFLite model.\r\n\r\n Args:\r\n tflite_model: Serialized TensorFlow Lite model.\r\n input_data: List of np.ndarray.\r\n\r\n Returns:\r\n {str: [np.ndarray]}.\r\n \"\"\"\r\n interpreter = _lite.Interpreter(model_content=tflite_model)\r\n interpreter.allocate_tensors()\r\n input_details = interpreter.get_input_details()\r\n return {\r\n input_tensor[\"name\"]: data\r\n for input_tensor, data in zip(input_details, input_data)\r\n }\r\n\r\n\r\ndef _generate_random_input_data(tflite_model, seed=None):\r\n \"\"\"Generates input data based on the input tensors in the TFLite model.\r\n\r\n Args:\r\n tflite_model: Serialized TensorFlow Lite model.\r\n seed: Integer seed for the random generator. (default None)\r\n\r\n Returns:\r\n ([np.ndarray], {str : [np.ndarray]}).\r\n \"\"\"\r\n interpreter = _lite.Interpreter(model_content=tflite_model)\r\n interpreter.allocate_tensors()\r\n input_details = interpreter.get_input_details()\r\n\r\n if seed:\r\n np.random.seed(seed=seed)\r\n input_data = [\r\n np.array(\r\n np.random.random_sample(input_tensor[\"shape\"]),\r\n dtype=input_tensor[\"dtype\"]) for input_tensor in input_details\r\n ]\r\n input_data_map = _get_input_data_map(tflite_model, input_data)\r\n return input_data, input_data_map\r\n\r\n\r\ndef _evaluate_tflite_model(tflite_model, input_data):\r\n \"\"\"Returns evaluation of input data on TFLite model.\r\n\r\n Args:\r\n tflite_model: Serialized TensorFlow Lite model.\r\n input_data: List of np.ndarray.\r\n\r\n Returns:\r\n List of np.ndarray.\r\n \"\"\"\r\n interpreter = _lite.Interpreter(model_content=tflite_model)\r\n interpreter.allocate_tensors()\r\n\r\n input_details = interpreter.get_input_details()\r\n output_details = interpreter.get_output_details()\r\n\r\n for input_tensor, tensor_data in zip(input_details, input_data):\r\n interpreter.set_tensor(input_tensor[\"index\"], tensor_data)\r\n\r\n interpreter.invoke()\r\n output_data = [\r\n interpreter.get_tensor(output_tensor[\"index\"])\r\n for output_tensor in output_details\r\n ]\r\n output_labels = [output_tensor[\"name\"] for output_tensor in output_details]\r\n return output_data, output_labels\r\n\r\n\r\ndef evaluate_frozen_graph(filename, input_arrays, output_arrays):\r\n \"\"\"Returns a function that evaluates the frozen graph on input data.\r\n\r\n Args:\r\n filename: Full filepath of file containing frozen GraphDef.\r\n input_arrays: List of input tensors to freeze graph with.\r\n output_arrays: List of output tensors to freeze graph with.\r\n\r\n Returns:\r\n Lambda function ([np.ndarray data] : [np.ndarray result]).\r\n \"\"\"\r\n with _file_io.FileIO(filename, \"rb\") as f:\r\n file_content = f.read()\r\n\r\n graph_def = _graph_pb2.GraphDef()\r\n try:\r\n graph_def.ParseFromString(file_content)\r\n except (_text_format.ParseError, DecodeError):\r\n if not isinstance(file_content, str):\r\n if PY3:\r\n file_content = file_content.decode(\"utf-8\")\r\n else:\r\n file_content = file_content.encode(\"utf-8\")\r\n _text_format.Merge(file_content, graph_def)\r\n\r\n graph = ops.Graph()\r\n with graph.as_default():\r\n _import_graph_def(graph_def, name=\"\")\r\n inputs = _util.get_tensors_from_tensor_names(graph, input_arrays)\r\n outputs = _util.get_tensors_from_tensor_names(graph, output_arrays)\r\n\r\n def run_session(input_data):\r\n with _session.Session(graph=graph) as sess:\r\n return sess.run(outputs, dict(zip(inputs, input_data)))\r\n\r\n return run_session\r\n\r\n\r\ndef evaluate_saved_model(directory, tag_set, signature_key):\r\n \"\"\"Returns a function that evaluates the SavedModel on input data.\r\n\r\n Args:\r\n directory: SavedModel directory to convert.\r\n tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to\r\n analyze. All tags in the tag set must be present.\r\n signature_key: Key identifying SignatureDef containing inputs and outputs.\r\n\r\n Returns:\r\n Lambda function ([np.ndarray data] : [np.ndarray result]).\r\n \"\"\"\r\n with _session.Session().as_default() as sess:\r\n if tag_set is None:\r\n tag_set = set([_tag_constants.SERVING])\r\n if signature_key is None:\r\n signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\r\n\r\n meta_graph = _loader.load(sess, tag_set, directory)\r\n signature_def = _convert_saved_model.get_signature_def(\r\n meta_graph, signature_key)\r\n inputs, outputs = _convert_saved_model.get_inputs_outputs(signature_def)\r\n\r\n return lambda input_data: sess.run(outputs, dict(zip(inputs, input_data)))\r\n\r\n\r\ndef evaluate_keras_model(filename):\r\n \"\"\"Returns a function that evaluates the tf.keras model on input data.\r\n\r\n Args:\r\n filename: Full filepath of HDF5 file containing the tf.keras model.\r\n\r\n Returns:\r\n Lambda function ([np.ndarray data] : [np.ndarray result]).\r\n \"\"\"\r\n keras_model = _keras.models.load_model(filename)\r\n return lambda input_data: [keras_model.predict(input_data)]\r\n\r\n\r\ndef compare_models(tflite_model, tf_eval_func, input_data=None, tolerance=5):\r\n \"\"\"Compares TensorFlow and TFLite models.\r\n\r\n Unless the input data is provided, the models are compared with random data.\r\n\r\n Args:\r\n tflite_model: Serialized TensorFlow Lite model.\r\n tf_eval_func: Lambda function that takes in input data and outputs the\r\n results of the TensorFlow model ([np.ndarray data] : [np.ndarray result]).\r\n input_data: np.ndarray to pass into models during inference. (default None)\r\n tolerance: Decimal place to check accuracy to. (default 5)\r\n \"\"\"\r\n if input_data is None:\r\n input_data, _ = _generate_random_input_data(tflite_model)\r\n tf_results = tf_eval_func(input_data)\r\n tflite_results, _ = _evaluate_tflite_model(tflite_model, input_data)\r\n for tf_result, tflite_result in zip(tf_results, tflite_results):\r\n np.testing.assert_almost_equal(tf_result, tflite_result, tolerance)\r\n\r\n\r\ndef compare_models_v2(tflite_model, tf_eval_func, input_data=None, tolerance=5):\r\n \"\"\"Compares TensorFlow and TFLite models for TensorFlow 2.0.\r\n\r\n Unless the input data is provided, the models are compared with random data.\r\n Currently only 1 input and 1 output are supported by this function.\r\n\r\n Args:\r\n tflite_model: Serialized TensorFlow Lite model.\r\n tf_eval_func: Function to evaluate TensorFlow model. Either a lambda\r\n function that takes in input data and outputs the results or a TensorFlow\r\n ConcreteFunction.\r\n input_data: np.ndarray to pass into models during inference. (default None)\r\n tolerance: Decimal place to check accuracy to. (default 5)\r\n \"\"\"\r\n # Convert the input data into a map.\r\n if input_data is None:\r\n input_data, input_data_map = _generate_random_input_data(tflite_model)\r\n else:\r\n input_data_map = _get_input_data_map(tflite_model, input_data)\r\n input_data_func_map = {\r\n input_name: constant_op.constant(input_data)\r\n for input_name, input_data in input_data_map.items()\r\n }\r\n\r\n if len(input_data) > 1:\r\n tf_results = tf_eval_func(**input_data_func_map)\r\n else:\r\n tf_results = tf_eval_func(constant_op.constant(input_data[0]))\r\n tflite_results, tflite_labels = _evaluate_tflite_model(\r\n tflite_model, input_data)\r\n\r\n # Convert the output TensorFlow results into an ordered list.\r\n if isinstance(tf_results, dict):\r\n if len(tf_results) == 1:\r\n tf_results = [tf_results[tf_results.keys()[0]]]\r\n else:\r\n tf_results = [tf_results[tflite_label] for tflite_label in tflite_labels]\r\n\r\n for tf_result, tflite_result in zip(tf_results, tflite_results):\r\n np.testing.assert_almost_equal(tf_result, tflite_result, tolerance)\r\n\r\n\r\ndef test_frozen_graph_quant(filename,\r\n input_arrays,\r\n output_arrays,\r\n input_shapes=None,\r\n **kwargs):\r\n \"\"\"Sanity check to validate post quantize flag alters the graph.\r\n\r\n This test does not check correctness of the converted model. It converts the\r\n TensorFlow frozen graph to TFLite with and without the post_training_quantized\r\n flag. It ensures some tensors have different types between the float and\r\n quantized models in the case of an all TFLite model or mix-and-match model.\r\n It ensures tensor types do not change in the case of an all Flex model.\r\n\r\n Args:\r\n filename: Full filepath of file containing frozen GraphDef.\r\n input_arrays: List of input tensors to freeze graph with.\r\n output_arrays: List of output tensors to freeze graph with.\r\n input_shapes: Dict of strings representing input tensor names to list of\r\n integers representing input shapes (e.g., {\"foo\" : [1, 16, 16, 3]}).\r\n Automatically determined when input shapes is None (e.g., {\"foo\" : None}).\r\n (default None)\r\n **kwargs: Additional arguments to be passed into the converter.\r\n\r\n Raises:\r\n ValueError: post_training_quantize flag doesn't act as intended.\r\n \"\"\"\r\n # Convert and load the float model.\r\n converter = _lite.TFLiteConverter.from_frozen_graph(\r\n filename, input_arrays, output_arrays, input_shapes)\r\n tflite_model_float = _convert(converter, **kwargs)\r\n\r\n interpreter_float = _lite.Interpreter(model_content=tflite_model_float)\r\n interpreter_float.allocate_tensors()\r\n float_tensors = interpreter_float.get_tensor_details()\r\n\r\n # Convert and load the quantized model.\r\n converter = _lite.TFLiteConverter.from_frozen_graph(filename, input_arrays,\r\n output_arrays)\r\n tflite_model_quant = _convert(\r\n converter, post_training_quantize=True, **kwargs)\r\n\r\n interpreter_quant = _lite.Interpreter(model_content=tflite_model_quant)\r\n interpreter_quant.allocate_tensors()\r\n quant_tensors = interpreter_quant.get_tensor_details()\r\n quant_tensors_map = {\r\n tensor_detail[\"name\"]: tensor_detail for tensor_detail in quant_tensors\r\n }\r\n\r\n # Check if weights are of different types in the float and quantized models.\r\n num_tensors_float = len(float_tensors)\r\n num_tensors_same_dtypes = sum(\r\n float_tensor[\"dtype\"] == quant_tensors_map[float_tensor[\"name\"]][\"dtype\"]\r\n for float_tensor in float_tensors)\r\n has_quant_tensor = num_tensors_float != num_tensors_same_dtypes\r\n\r\n # For the \"flex\" case, post_training_quantize should not alter the graph,\r\n # unless we are quantizing to float16.\r\n if (\"target_ops\" in kwargs and\r\n not kwargs.get(\"quantize_to_float16\", False) and\r\n set(kwargs[\"target_ops\"]) == set([_lite.OpsSet.SELECT_TF_OPS])):\r\n if has_quant_tensor:\r\n raise ValueError(\"--post_training_quantize flag unexpectedly altered the \"\r\n \"full Flex mode graph.\")\r\n elif not has_quant_tensor:\r\n raise ValueError(\"--post_training_quantize flag was unable to quantize the \"\r\n \"graph as expected in TFLite and mix-and-match mode.\")\r\n\r\n\r\ndef test_frozen_graph(filename,\r\n input_arrays,\r\n output_arrays,\r\n input_shapes=None,\r\n input_data=None,\r\n **kwargs):\r\n \"\"\"Validates the TensorFlow frozen graph converts to a TFLite model.\r\n\r\n Converts the TensorFlow frozen graph to TFLite and checks the accuracy of the\r\n model on random data.\r\n\r\n Args:\r\n filename: Full filepath of file containing frozen GraphDef.\r\n input_arrays: List of input tensors to freeze graph with.\r\n output_arrays: List of output tensors to freeze graph with.\r\n input_shapes: Dict of strings representing input tensor names to list of\r\n integers representing input shapes (e.g., {\"foo\" : [1, 16, 16, 3]}).\r\n Automatically determined when input shapes is None (e.g., {\"foo\" : None}).\r\n (default None)\r\n input_data: np.ndarray to pass into models during inference. (default None)\r\n **kwargs: Additional arguments to be passed into the converter.\r\n \"\"\"\r\n converter = _lite.TFLiteConverter.from_frozen_graph(\r\n filename, input_arrays, output_arrays, input_shapes)\r\n tflite_model = _convert(converter, **kwargs)\r\n\r\n tf_eval_func = evaluate_frozen_graph(filename, input_arrays, output_arrays)\r\n compare_models(tflite_model, tf_eval_func, input_data=input_data)\r\n\r\n\r\ndef test_saved_model(directory,\r\n input_shapes=None,\r\n tag_set=None,\r\n signature_key=None,\r\n input_data=None,\r\n **kwargs):\r\n \"\"\"Validates the TensorFlow SavedModel converts to a TFLite model.\r\n\r\n Converts the TensorFlow SavedModel to TFLite and checks the accuracy of the\r\n model on random data.\r\n\r\n Args:\r\n directory: SavedModel directory to convert.\r\n input_shapes: Dict of strings representing input tensor names to list of\r\n integers representing input shapes (e.g., {\"foo\" : [1, 16, 16, 3]}).\r\n Automatically determined when input shapes is None (e.g., {\"foo\" : None}).\r\n (default None)\r\n tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to\r\n analyze. All tags in the tag set must be present.\r\n signature_key: Key identifying SignatureDef containing inputs and outputs.\r\n input_data: np.ndarray to pass into models during inference. (default None)\r\n **kwargs: Additional arguments to be passed into the converter.\r\n \"\"\"\r\n converter = _lite.TFLiteConverter.from_saved_model(\r\n directory,\r\n input_shapes=input_shapes,\r\n tag_set=tag_set,\r\n signature_key=signature_key)\r\n tflite_model = _convert(converter, **kwargs)\r\n\r\n tf_eval_func = evaluate_saved_model(directory, tag_set, signature_key)\r\n compare_models(tflite_model, tf_eval_func, input_data=input_data)\r\n\r\n\r\ndef test_saved_model_v2(directory,\r\n tag_set=None,\r\n signature_key=None,\r\n input_data=None,\r\n **kwargs):\r\n \"\"\"Validates the TensorFlow SavedModel converts to a TFLite model.\r\n\r\n Converts the TensorFlow SavedModel to TFLite and checks the accuracy of the\r\n model on random data.\r\n\r\n Args:\r\n directory: SavedModel directory to convert.\r\n tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to\r\n analyze. All tags in the tag set must be present.\r\n signature_key: Key identifying SignatureDef containing inputs and outputs.\r\n input_data: np.ndarray to pass into models during inference. (default None)\r\n **kwargs: Additional arguments to be passed into the converter.\r\n \"\"\"\r\n model = _load.load(directory, tags=tag_set)\r\n if not signature_key:\r\n signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\r\n concrete_func = model.signatures[signature_key]\r\n\r\n converter = _lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\r\n tflite_model = _convert(converter, **kwargs)\r\n\r\n compare_models_v2(tflite_model, concrete_func, input_data=input_data)\r\n\r\n\r\ndef test_saved_model_v2_quant_float16(directory, **kwargs):\r\n \"\"\"Validates the TensorFlow SavedModel converts to a TFLite model.\"\"\"\r\n\r\n converter = _lite.TFLiteConverterV2.from_saved_model(directory)\r\n tflite_model_float = _convert(converter, version=2, **kwargs)\r\n\r\n interpreter_float = _lite.Interpreter(model_content=tflite_model_float)\r\n interpreter_float.allocate_tensors()\r\n float_tensors = interpreter_float.get_tensor_details()\r\n\r\n tflite_model_quant = _convert(\r\n converter,\r\n version=2,\r\n post_training_quantize=True,\r\n quantize_to_float16=True,\r\n **kwargs)\r\n\r\n interpreter_quant = _lite.Interpreter(model_content=tflite_model_quant)\r\n interpreter_quant.allocate_tensors()\r\n quant_tensors = interpreter_quant.get_tensor_details()\r\n quant_tensors_map = {\r\n tensor_detail[\"name\"]: tensor_detail for tensor_detail in quant_tensors\r\n }\r\n\r\n # Check if weights are of different types in the float and quantized models.\r\n num_tensors_float = len(float_tensors)\r\n num_tensors_same_dtypes = sum(\r\n float_tensor[\"dtype\"] == quant_tensors_map[float_tensor[\"name\"]][\"dtype\"]\r\n for float_tensor in float_tensors)\r\n has_quant_tensor = num_tensors_float != num_tensors_same_dtypes\r\n\r\n if not has_quant_tensor:\r\n raise ValueError(\"--post_training_quantize flag was unable to quantize the \"\r\n \"graph as expected.\")\r\n\r\n\r\ndef test_keras_model(filename,\r\n input_arrays=None,\r\n input_shapes=None,\r\n input_data=None,\r\n **kwargs):\r\n \"\"\"Validates the tf.keras model converts to a TFLite model.\r\n\r\n Converts the tf.keras model to TFLite and checks the accuracy of the model on\r\n random data.\r\n\r\n Args:\r\n filename: Full filepath of HDF5 file containing the tf.keras model.\r\n input_arrays: List of input tensors to freeze graph with.\r\n input_shapes: Dict of strings representing input tensor names to list of\r\n integers representing input shapes (e.g., {\"foo\" : [1, 16, 16, 3]}).\r\n Automatically determined when input shapes is None (e.g., {\"foo\" : None}).\r\n (default None)\r\n input_data: np.ndarray to pass into models during inference. (default None)\r\n **kwargs: Additional arguments to be passed into the converter.\r\n \"\"\"\r\n converter = _lite.TFLiteConverter.from_keras_model_file(\r\n filename, input_arrays=input_arrays, input_shapes=input_shapes)\r\n tflite_model = _convert(converter, **kwargs)\r\n\r\n tf_eval_func = evaluate_keras_model(filename)\r\n compare_models(tflite_model, tf_eval_func, input_data=input_data)\r\n\r\n\r\ndef test_keras_model_v2(filename, input_shapes=None, input_data=None, **kwargs):\r\n \"\"\"Validates the tf.keras model converts to a TFLite model.\r\n\r\n Converts the tf.keras model to TFLite and checks the accuracy of the model on\r\n random data.\r\n\r\n Args:\r\n filename: Full filepath of HDF5 file containing the tf.keras model.\r\n input_shapes: List of list of integers representing input shapes in the\r\n order of the tf.keras model's .input attribute (e.g., [[1, 16, 16, 3]]).\r\n (default None)\r\n input_data: np.ndarray to pass into models during inference. (default None)\r\n **kwargs: Additional arguments to be passed into the converter.\r\n \"\"\"\r\n keras_model = _keras.models.load_model(filename)\r\n if input_shapes:\r\n for tensor, shape in zip(keras_model.inputs, input_shapes):\r\n tensor.set_shape(shape)\r\n\r\n converter = _lite.TFLiteConverterV2.from_keras_model(keras_model)\r\n tflite_model = _convert(converter, **kwargs)\r\n\r\n tf_eval_func = evaluate_keras_model(filename)\r\n compare_models_v2(tflite_model, tf_eval_func, input_data=input_data)\r\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for LossScale classes..\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom absl.testing import parameterized\r\nimport numpy as np\r\n\r\nfrom tensorflow.python.data.ops import dataset_ops\r\nfrom tensorflow.python.distribute import distribution_strategy_context\r\nfrom tensorflow.python.distribute import mirrored_strategy\r\nfrom tensorflow.python.eager import context\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import check_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import variables\r\nfrom tensorflow.python.platform import test\r\nfrom tensorflow.python.training.experimental import loss_scale as loss_scale_module\r\n\r\n# TODO(reedwm): Create test case using multiple graphs\r\n\r\n# If called outside any strategy.scope() calls, this will return the default\r\n# strategy.\r\ndefault_strategy_fn = distribution_strategy_context.get_strategy\r\n\r\n\r\ndef create_mirrored_strategy():\r\n if context.num_gpus() >= 1:\r\n return mirrored_strategy.MirroredStrategy(['cpu:0', 'gpu:0'])\r\n else:\r\n return mirrored_strategy.MirroredStrategy(['cpu:0'])\r\n\r\n\r\nTESTCASES = ({\r\n 'testcase_name': 'base',\r\n 'strategy_fn': default_strategy_fn\r\n}, {\r\n 'testcase_name': 'distribute',\r\n 'strategy_fn': create_mirrored_strategy\r\n})\r\n\r\n\r\nclass FixedLossScaleTest(test.TestCase):\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_basic(self):\r\n loss_scale_value = 1000\r\n loss_scale = loss_scale_module.FixedLossScale(loss_scale_value)\r\n\r\n update_op, should_apply = loss_scale.update([constant_op.constant(0.)])\r\n self.evaluate(update_op)\r\n # should_apply should be a bool instead of a tensor, so that a tf.cond does\r\n # not have to be built in the graph by the caller.\r\n self.assertIsInstance(should_apply, bool)\r\n self.assertTrue(should_apply)\r\n self.assertEqual(loss_scale_value, self.evaluate(loss_scale()))\r\n\r\n update_op, should_apply = loss_scale.update(\r\n [constant_op.constant(float('NaN'))])\r\n self.evaluate(update_op)\r\n self.assertIsInstance(should_apply, bool)\r\n self.assertTrue(should_apply)\r\n self.assertEqual(loss_scale_value, self.evaluate(loss_scale()))\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_serialization(self):\r\n loss_scale = loss_scale_module.get(123)\r\n config = loss_scale.get_config()\r\n loss_scale = loss_scale_module.FixedLossScale.from_config(config)\r\n self.assertEqual(self.evaluate(loss_scale()), 123.)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_call_type(self):\r\n scalar = loss_scale_module.FixedLossScale(123)\r\n self.assertIsInstance(scalar(), ops.Tensor)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_repr(self):\r\n loss_scale = loss_scale_module.FixedLossScale(123)\r\n self.assertEqual(repr(loss_scale), 'FixedLossScale(123.0)')\r\n\r\n\r\ndef _get_example_iter(inputs):\r\n dataset = dataset_ops.Dataset.from_tensor_slices(inputs)\r\n return dataset_ops.make_one_shot_iterator(dataset)\r\n\r\n\r\nclass DynamicLossScaleTest(test.TestCase, parameterized.TestCase):\r\n\r\n def _get_tensor(self, is_finite):\r\n tensor = control_flow_ops.cond(is_finite, lambda: 1., lambda: float('NaN'))\r\n\r\n if not distribution_strategy_context.has_strategy():\r\n return tensor\r\n\r\n def get():\r\n rep_id = (\r\n distribution_strategy_context.get_replica_context()\r\n .replica_id_in_sync_group)\r\n return control_flow_ops.cond(\r\n math_ops.equal(rep_id, 0), lambda: tensor, lambda: 1.)\r\n\r\n distribution = distribution_strategy_context.get_strategy()\r\n return distribution.extended.call_for_each_replica(get)\r\n\r\n def _test_helper(self,\r\n inputs,\r\n expected_outputs,\r\n initial_loss_scale=1.,\r\n increment_period=2,\r\n multiplier=2):\r\n loss_scale = loss_scale_module.DynamicLossScale(\r\n initial_loss_scale=initial_loss_scale,\r\n increment_period=increment_period,\r\n multiplier=multiplier)\r\n itr = _get_example_iter(inputs)\r\n\r\n def update():\r\n is_finite = itr.get_next()\r\n grad = self._get_tensor(is_finite)\r\n update_op, should_apply_gradients = loss_scale.update([grad])\r\n assert_op = check_ops.assert_equal(should_apply_gradients, is_finite)\r\n if context.executing_eagerly():\r\n return\r\n with ops.control_dependencies([assert_op]):\r\n return array_ops.identity(update_op)\r\n\r\n actual_outputs = []\r\n\r\n if not context.executing_eagerly():\r\n update_op = update()\r\n self.evaluate(variables.global_variables_initializer())\r\n for _ in range(len(inputs)):\r\n if context.executing_eagerly():\r\n update()\r\n else:\r\n self.evaluate(update_op)\r\n actual_outputs.append(self.evaluate(loss_scale()))\r\n self.assertEqual(actual_outputs, expected_outputs)\r\n\r\n @parameterized.named_parameters(*TESTCASES)\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_increase(self, strategy_fn):\r\n with strategy_fn().scope():\r\n inputs = [True] * 6\r\n expected_outputs = [1, 2, 2, 4, 4, 8]\r\n self._test_helper(inputs, expected_outputs)\r\n\r\n @parameterized.named_parameters(*TESTCASES)\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_keep_increasing_until_capped(self, strategy_fn):\r\n with strategy_fn().scope():\r\n init_loss_scale = np.finfo(np.float32).max / 4\r\n max_float = np.finfo(np.float32).max\r\n\r\n inputs = [True] * 6\r\n # Output is capped the 2nd time it doubles.\r\n expected_outputs = [\r\n init_loss_scale, init_loss_scale * 2, init_loss_scale * 2, max_float,\r\n max_float, max_float\r\n ]\r\n\r\n self._test_helper(inputs, expected_outputs, init_loss_scale)\r\n\r\n @parameterized.named_parameters(*TESTCASES)\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_decrease_every_step(self, strategy_fn):\r\n with strategy_fn().scope():\r\n inputs = [False] * 6\r\n init_loss_scale = 1024\r\n expected_outputs = [512, 256, 128, 64, 32, 16]\r\n\r\n self._test_helper(inputs, expected_outputs, init_loss_scale)\r\n\r\n @parameterized.named_parameters(*TESTCASES)\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_keep_decreasing_until_one(self, strategy_fn):\r\n with strategy_fn().scope():\r\n inputs = [False] * 6\r\n init_loss_scale = 16\r\n expected_outputs = [8, 4, 2, 1, 1, 1]\r\n\r\n self._test_helper(inputs, expected_outputs, init_loss_scale)\r\n\r\n @parameterized.named_parameters(*TESTCASES)\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_nan_clear_good_step(self, strategy_fn):\r\n with strategy_fn().scope():\r\n inputs = [True, True, True, False, True]\r\n expected_outputs = [1, 2, 2, 1, 1]\r\n self._test_helper(inputs, expected_outputs)\r\n\r\n @parameterized.named_parameters(*TESTCASES)\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_trigger_loss_scale_update_each_step(self, strategy_fn):\r\n with strategy_fn().scope():\r\n init_loss_scale = 1\r\n increment_period = 1\r\n\r\n inputs = [True] * 3 + [False, True, True]\r\n expected_outputs = [2, 4, 8, 4, 8, 16]\r\n\r\n self._test_helper(inputs, expected_outputs, init_loss_scale,\r\n increment_period)\r\n\r\n @parameterized.named_parameters(*TESTCASES)\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_alternating_good_and_bad_gradients_trigger_each_step(\r\n self, strategy_fn):\r\n with strategy_fn().scope():\r\n init_loss_scale = 1\r\n increment_period = 1\r\n\r\n inputs = [True, False] * 4 + [True]\r\n expected_outputs = [2, 1, 2, 1, 2, 1, 2, 1, 2]\r\n self._test_helper(inputs, expected_outputs, init_loss_scale,\r\n increment_period)\r\n\r\n @parameterized.named_parameters(*TESTCASES)\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_alternating_good_and_bad_gradients_trigger_every_other_step(\r\n self, strategy_fn):\r\n with strategy_fn().scope():\r\n init_loss_scale = 32\r\n increment_period = 2\r\n\r\n inputs = [True, False] * 3 + [True]\r\n expected_outputs = [32, 16, 16, 8, 8, 4, 4]\r\n self._test_helper(inputs, expected_outputs, init_loss_scale,\r\n increment_period)\r\n\r\n @parameterized.named_parameters(*TESTCASES)\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_nondefault_multiplier(self, strategy_fn):\r\n with strategy_fn().scope():\r\n init_loss_scale = 4\r\n multiplier = 3\r\n inputs = [True, True, False, True, True]\r\n expected_outputs = [4, 12, 4, 4, 12]\r\n self._test_helper(\r\n inputs, expected_outputs, init_loss_scale, multiplier=multiplier)\r\n\r\n @parameterized.named_parameters(*TESTCASES)\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_random_mix_good_and_bad_gradients(self, strategy_fn):\r\n with strategy_fn().scope():\r\n init_loss_scale = 4\r\n inputs = [\r\n False, True, True, True, False, True, False, True, True, True, False\r\n ]\r\n expected_outputs = [2, 2, 4, 4, 2, 2, 1, 1, 2, 2, 1]\r\n self._test_helper(inputs, expected_outputs, init_loss_scale)\r\n\r\n @parameterized.named_parameters(*TESTCASES)\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_single_tensor_gradient(self, strategy_fn):\r\n with strategy_fn().scope():\r\n loss_scale = loss_scale_module.DynamicLossScale()\r\n grad = constant_op.constant(4.0)\r\n _, should_apply = loss_scale.update(grad)\r\n self.assertTrue(self.evaluate(should_apply))\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_serialization(self):\r\n loss_scale = loss_scale_module.DynamicLossScale(\r\n initial_loss_scale=1, increment_period=2, multiplier=3)\r\n config = loss_scale.get_config()\r\n loss_scale = loss_scale_module.DynamicLossScale.from_config(config)\r\n self.evaluate(variables.global_variables_initializer())\r\n self.assertEqual(self.evaluate(loss_scale()), 1)\r\n self.assertEqual(loss_scale.increment_period, 2)\r\n self.assertEqual(loss_scale.multiplier, 3)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_update_with_none_gradients(self):\r\n loss_scale = loss_scale_module.DynamicLossScale()\r\n loss_scale.update([None])\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_get(self):\r\n scalar = loss_scale_module.get('dynamic')\r\n scalar2 = loss_scale_module.DynamicLossScale()\r\n self.assertEqual(scalar.initial_loss_scale, scalar2.initial_loss_scale)\r\n self.assertEqual(scalar.increment_period, scalar2.increment_period)\r\n self.assertEqual(scalar.multiplier, scalar2.multiplier)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_call_type(self):\r\n scalar = loss_scale_module.DynamicLossScale()\r\n self.assertIsInstance(scalar(), ops.Tensor)\r\n\r\n @parameterized.named_parameters(*TESTCASES)\r\n @test_util.run_in_graph_and_eager_modes\r\n def test_repr(self, strategy_fn):\r\n with strategy_fn().scope():\r\n loss_scale = loss_scale_module.DynamicLossScale(\r\n initial_loss_scale=1, increment_period=2, multiplier=3)\r\n if context.executing_eagerly():\r\n self.assertEqual(repr(loss_scale),\r\n 'DynamicLossScale(current_loss_scale=1.0, '\r\n 'num_good_steps=0, initial_loss_scale=1.0, '\r\n 'increment_period=2, multiplier=3.0)')\r\n else:\r\n self.assertEqual(repr(loss_scale),\r\n 'DynamicLossScale(initial_loss_scale=1.0, '\r\n 'increment_period=2, multiplier=3.0)')\r\n\r\n\r\nif __name__ == '__main__':\r\n test.main()\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Ops and modules related to resampler.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\n\r\n# pylint: disable=wildcard-import\r\nfrom tensorflow.contrib.resampler.python.ops.resampler_ops import *\r\nfrom tensorflow.python.util.all_util import remove_undocumented\r\n\r\nremove_undocumented(__name__, [\"resampler\"])\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Class implementing a single machine parameter server strategy.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.distribute import device_util\r\nfrom tensorflow.python.distribute import distribute_lib\r\nfrom tensorflow.python.distribute import parameter_server_strategy\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n@tf_export(\"distribute.experimental.CentralStorageStrategy\", v1=[])\r\nclass CentralStorageStrategy(distribute_lib.Strategy):\r\n \"\"\"A one-machine strategy that puts all variables on a single device.\r\n\r\n Variables are assigned to local CPU or the only GPU. If there is more\r\n than one GPU, compute operations (other than variable update operations)\r\n will be replicated across all GPUs.\r\n\r\n For Example:\r\n ```\r\n strategy = tf.distribute.experimental.CentralStorageStrategy()\r\n # Create a dataset\r\n ds = tf.data.Dataset.range(5).batch(2)\r\n # Distribute that dataset\r\n dist_dataset = strategy.experimental_distribute_dataset(ds)\r\n\r\n with strategy.scope():\r\n @tf.function\r\n def train_step(val):\r\n return val + 1\r\n\r\n # Iterate over the distributed dataset\r\n for x in dist_dataset:\r\n # process dataset elements\r\n strategy.experimental_run_v2(train_step, args=(x,))\r\n ```\r\n \"\"\"\r\n\r\n def __init__(self, compute_devices=None, parameter_device=None):\r\n extended = parameter_server_strategy.ParameterServerStrategyExtended(\r\n self,\r\n compute_devices=compute_devices,\r\n parameter_device=parameter_device)\r\n \"\"\"Initializes the strategy with optional device strings.\r\n\r\n Args:\r\n compute_devices: an optional list of strings for device to replicate models\r\n on. If this is not provided, all local GPUs will be used; if there is no\r\n GPU, local CPU will be used.\r\n parameter_device: an optional device string for which device to put\r\n variables on. The default one is CPU or GPU if there is only one.\r\n \"\"\"\r\n super(CentralStorageStrategy, self).__init__(extended)\r\n\r\n @classmethod\r\n def _from_num_gpus(cls, num_gpus):\r\n return cls(device_util.local_devices_from_num_gpus(num_gpus))\r\n\r\n def experimental_distribute_dataset(self, dataset): # pylint: disable=useless-super-delegation\r\n \"\"\"Distributes a tf.data.Dataset instance provided via dataset.\r\n\r\n The returned dataset is a wrapped strategy dataset which creates a\r\n multidevice iterator under the hood. It prefetches the input data to the\r\n specified devices on the worker. The returned distributed dataset can be\r\n iterated over similar to how regular datasets can.\r\n\r\n NOTE: Currently, the user cannot add any more transformations to a\r\n distributed dataset.\r\n\r\n For Example:\r\n ```\r\n strategy = tf.distribute.CentralStorageStrategy() # with 1 CPU and 1 GPU\r\n dataset = tf.data.Dataset.range(10).batch(2)\r\n dist_dataset = strategy.experimental_distribute_dataset(dataset)\r\n for x in dist_dataset:\r\n print(x) # Prints PerReplica values [0, 1], [2, 3],...\r\n\r\n ```\r\n Args:\r\n dataset: `tf.data.Dataset` to be prefetched to device.\r\n\r\n Returns:\r\n A \"distributed `Dataset`\" that the caller can iterate over.\r\n \"\"\"\r\n return super(CentralStorageStrategy, self).experimental_distribute_dataset(\r\n dataset)\r\n\r\n def experimental_distribute_datasets_from_function(self, dataset_fn): # pylint: disable=useless-super-delegation\r\n \"\"\"Distributes `tf.data.Dataset` instances created by calls to `dataset_fn`.\r\n\r\n `dataset_fn` will be called once for each worker in the strategy. In this\r\n case, we only have one worker so `dataset_fn` is called once. Each replica\r\n on this worker will then dequeue a batch of elements from this local\r\n dataset.\r\n\r\n The `dataset_fn` should take an `tf.distribute.InputContext` instance where\r\n information about batching and input replication can be accessed.\r\n\r\n For Example:\r\n ```\r\n def dataset_fn(input_context):\r\n batch_size = input_context.get_per_replica_batch_size(global_batch_size)\r\n d = tf.data.Dataset.from_tensors([[1.]]).repeat().batch(batch_size)\r\n return d.shard(\r\n input_context.num_input_pipelines, input_context.input_pipeline_id)\r\n\r\n inputs = strategy.experimental_distribute_datasets_from_function(dataset_fn)\r\n\r\n for batch in inputs:\r\n replica_results = strategy.experimental_run_v2(replica_fn, args=(batch,))\r\n ```\r\n\r\n IMPORTANT: The `tf.data.Dataset` returned by `dataset_fn` should have a\r\n per-replica batch size, unlike `experimental_distribute_dataset`, which uses\r\n the global batch size. This may be computed using\r\n `input_context.get_per_replica_batch_size`.\r\n\r\n Args:\r\n dataset_fn: A function taking a `tf.distribute.InputContext` instance and\r\n returning a `tf.data.Dataset`.\r\n\r\n Returns:\r\n A \"distributed `Dataset`\", which the caller can iterate over like regular\r\n datasets.\r\n \"\"\"\r\n return super(\r\n CentralStorageStrategy,\r\n self).experimental_distribute_datasets_from_function(dataset_fn)\r\n\r\n def experimental_local_results(self, value): # pylint: disable=useless-super-delegation\r\n \"\"\"Returns the list of all local per-replica values contained in `value`.\r\n\r\n In `CentralStorageStrategy` there is a single worker so the value returned\r\n will be all the values on that worker.\r\n\r\n Args:\r\n value: A value returned by `experimental_run()`, `experimental_run_v2()`,\r\n `extended.call_for_each_replica()`, or a variable created in `scope`.\r\n\r\n Returns:\r\n A tuple of values contained in `value`. If `value` represents a single\r\n value, this returns `(value,).`\r\n \"\"\"\r\n return super(CentralStorageStrategy, self).experimental_local_results(value)\r\n\r\n def experimental_run_v2(self, fn, args=(), kwargs=None): # pylint: disable=useless-super-delegation\r\n \"\"\"Run `fn` on each replica, with the given arguments.\r\n\r\n In `CentralStorageStrategy`, `fn` is called on each of the compute\r\n replicas, with the provided \"per replica\" arguments specific to that device.\r\n\r\n Args:\r\n fn: The function to run. The output must be a `tf.nest` of `Tensor`s.\r\n args: (Optional) Positional arguments to `fn`.\r\n kwargs: (Optional) Keyword arguments to `fn`.\r\n\r\n Returns:\r\n Return value from running `fn`.\r\n \"\"\"\r\n return super(CentralStorageStrategy, self).experimental_run_v2(fn, args,\r\n kwargs)\r\n\r\n def reduce(self, reduce_op, value, axis): # pylint: disable=useless-super-delegation\r\n \"\"\"Reduce `value` across replicas.\r\n\r\n Given a per-replica value returned by `experimental_run_v2`, say a\r\n per-example loss, the batch will be divided across all the replicas. This\r\n function allows you to aggregate across replicas and optionally also across\r\n batch elements. For example, if you have a global batch size of 8 and 2\r\n replicas, values for examples `[0, 1, 2, 3]` will be on replica 0 and\r\n `[4, 5, 6, 7]` will be on replica 1. By default, `reduce` will just\r\n aggregate across replicas, returning `[0+4, 1+5, 2+6, 3+7]`. This is useful\r\n when each replica is computing a scalar or some other value that doesn't\r\n have a \"batch\" dimension (like a gradient). More often you will want to\r\n aggregate across the global batch, which you can get by specifying the batch\r\n dimension as the `axis`, typically `axis=0`. In this case it would return a\r\n scalar `0+1+2+3+4+5+6+7`.\r\n\r\n If there is a last partial batch, you will need to specify an axis so\r\n that the resulting shape is consistent across replicas. So if the last\r\n batch has size 6 and it is divided into [0, 1, 2, 3] and [4, 5], you\r\n would get a shape mismatch unless you specify `axis=0`. If you specify\r\n `tf.distribute.ReduceOp.MEAN`, using `axis=0` will use the correct\r\n denominator of 6. Contrast this with computing `reduce_mean` to get a\r\n scalar value on each replica and this function to average those means,\r\n which will weigh some values `1/8` and others `1/4`.\r\n\r\n For Example:\r\n ```\r\n strategy = tf.distribute.experimental.CentralStorageStrategy(\r\n compute_devices=['CPU:0', 'GPU:0'], parameter_device='CPU:0')\r\n ds = tf.data.Dataset.range(10)\r\n # Distribute that dataset\r\n dist_dataset = strategy.experimental_distribute_dataset(ds)\r\n\r\n with strategy.scope():\r\n @tf.function\r\n def train_step(val):\r\n # pass through\r\n return val\r\n\r\n # Iterate over the distributed dataset\r\n for x in dist_dataset:\r\n result = strategy.experimental_run_v2(train_step, args=(x,))\r\n\r\n result = strategy.reduce(tf.distribute.ReduceOp.SUM, result,\r\n axis=None).numpy()\r\n # result: array([ 4, 6, 8, 10])\r\n\r\n result = strategy.reduce(tf.distribute.ReduceOp.SUM, result, axis=0).numpy()\r\n # result: 28\r\n ```\r\n\r\n Args:\r\n reduce_op: A `tf.distribute.ReduceOp` value specifying how values should\r\n be combined.\r\n value: A \"per replica\" value, e.g. returned by `experimental_run_v2` to\r\n be combined into a single tensor.\r\n axis: Specifies the dimension to reduce along within each\r\n replica's tensor. Should typically be set to the batch dimension, or\r\n `None` to only reduce across replicas (e.g. if the tensor has no batch\r\n dimension).\r\n\r\n Returns:\r\n A `Tensor`.\r\n \"\"\"\r\n return super(CentralStorageStrategy, self).reduce(reduce_op, value, axis)\r\n\r\n\r\n@tf_export(v1=[\"distribute.experimental.CentralStorageStrategy\"]) # pylint: disable=missing-docstring\r\nclass CentralStorageStrategyV1(distribute_lib.StrategyV1):\r\n\r\n __doc__ = CentralStorageStrategy.__doc__\r\n\r\n def __init__(self, compute_devices=None, parameter_device=None):\r\n super(CentralStorageStrategyV1, self).__init__(\r\n parameter_server_strategy.ParameterServerStrategyExtended(\r\n self,\r\n compute_devices=compute_devices,\r\n parameter_device=parameter_device))\r\n __init__.__doc__ = CentralStorageStrategy.__init__.__doc__\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nfrom tensorflow.compiler.tests import xla_test\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import gen_array_ops\r\nfrom tensorflow.python.platform import googletest\r\n\r\n\r\nclass FakeQuantWithMinMaxArgsTest(xla_test.XLATestCase):\r\n \"\"\"Test cases for FakeQuantWithMinMaxArgs operation.\"\"\"\r\n\r\n # 8 bits, wide range.\r\n def testOp_with8BitsNoScalingNoNudging(self):\r\n self._TestOp(0.0, 255.0, 8, False, 0.0, 255.0, 1.0)\r\n\r\n def testOp_with8BitsScalingAndNudgingDown(self):\r\n self._TestOp(0.5, 128.0, 8, False, 0.0, 127.5, 0.5)\r\n\r\n def testOp_with8BitsScalingAndNudgingUp(self):\r\n self._TestOp(-128.0, -0.5, 8, False, -127.5, 0.0, 0.5)\r\n\r\n def testOp_with8BitsScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 127.4, 8, False, 0.0, 127.5, 0.5)\r\n\r\n # 8 bits, narrow range.\r\n def testOp_with8BitsNarrowRangeNoScalingNoNudging(self):\r\n self._TestOp(0.0, 254.0, 8, True, 0.0, 254.0, 1.0)\r\n\r\n def testOp_with8BitsNarrowRangeScalingAndNudgingDown(self):\r\n self._TestOp(0.1, 127.1, 8, True, 0.0, 127.0, 0.5)\r\n\r\n def testOp_with8BitsNarrowRangeScalingAndNudgingUp(self):\r\n self._TestOp(-127.1, -0.1, 8, True, -127.0, 0.0, 0.5)\r\n\r\n def testOp_with8BitsNarrowRangeScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 126.9, 8, True, 0.0, 127.0, 0.5)\r\n\r\n # 7 bits, wide range.\r\n def testOp_with7BitsNoScalingNoNudging(self):\r\n self._TestOp(0.0, 127.0, 7, False, 0.0, 127.0, 1.0)\r\n\r\n def testOp_with7BitsScalingAndNudgingDown(self):\r\n self._TestOp(0.5, 64.0, 7, False, 0.0, 63.5, 0.5)\r\n\r\n def testOp_with7BitsScalingAndNudgingUp(self):\r\n self._TestOp(-64.0, -0.5, 7, False, -63.5, 0.0, 0.5)\r\n\r\n def testOp_with7BitsScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 63.4, 7, False, 0.0, 63.5, 0.5)\r\n\r\n # 7 bits, narrow range.\r\n def testOp_with7BitsNarrowRangeNoScalingNoNudging(self):\r\n self._TestOp(0.0, 126.0, 7, True, 0.0, 126.0, 1.0)\r\n\r\n def testOp_with7BitsNarrowRangeScalingAndNudgingDown(self):\r\n self._TestOp(0.1, 63.1, 7, True, 0.0, 63.0, 0.5)\r\n\r\n def testOp_with7BitsNarrowRangeScalingAndNudgingUp(self):\r\n self._TestOp(-63.1, -0.1, 7, True, -63.0, 0.0, 0.5)\r\n\r\n def testOp_with7BitsNarrowRangeScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 62.9, 7, True, 0.0, 63.0, 0.5)\r\n\r\n def _TestOp(self, input_min, input_max, num_bits, narrow_range,\r\n expected_nudged_input_min, expected_nudged_input_max,\r\n expected_step):\r\n inputs = np.array(\r\n [\r\n expected_nudged_input_min - expected_step,\r\n expected_nudged_input_min - 0.01, expected_nudged_input_min,\r\n expected_nudged_input_min + 0.01,\r\n expected_nudged_input_min + expected_step - 0.01,\r\n expected_nudged_input_min + expected_step,\r\n expected_nudged_input_min + expected_step + 0.01,\r\n expected_nudged_input_max - 0.01, expected_nudged_input_max,\r\n expected_nudged_input_max + 0.01,\r\n expected_nudged_input_max + expected_step\r\n ],\r\n dtype=np.float32)\r\n expected = np.array(\r\n [\r\n expected_nudged_input_min, expected_nudged_input_min,\r\n expected_nudged_input_min, expected_nudged_input_min,\r\n expected_nudged_input_min + expected_step,\r\n expected_nudged_input_min + expected_step,\r\n expected_nudged_input_min + expected_step,\r\n expected_nudged_input_max, expected_nudged_input_max,\r\n expected_nudged_input_max, expected_nudged_input_max\r\n ],\r\n dtype=np.float32)\r\n\r\n with self.session() as session:\r\n with self.test_scope():\r\n input_placeholder = array_ops.placeholder(\r\n dtypes.float32, inputs.shape, name=\"inputs\")\r\n outputs = array_ops.fake_quant_with_min_max_args(\r\n input_placeholder,\r\n min=input_min,\r\n max=input_max,\r\n num_bits=num_bits,\r\n narrow_range=narrow_range)\r\n result = session.run(outputs, {input_placeholder: inputs})\r\n self.assertAllCloseAccordingToType(\r\n result, expected, rtol=1e-3, atol=1e-5, bfloat16_rtol=0.03)\r\n\r\n\r\nclass FakeQuantWithMinMaxArgsGradientTest(xla_test.XLATestCase):\r\n \"\"\"Test cases for FakeQuantWithMinMaxArgsGradient operation.\"\"\"\r\n\r\n # 8 bits, wide range.\r\n def testOp_with8BitsNoScalingNoNudging(self):\r\n self._TestOp(0.0, 255.0, 8, False, 0.0, 255.0, 1.0)\r\n\r\n def testOp_with8BitsScalingAndNudgingDown(self):\r\n self._TestOp(0.5, 128.0, 8, False, 0.0, 127.5, 0.5)\r\n\r\n def testOp_with8BitsScalingAndNudgingUp(self):\r\n self._TestOp(-128.0, -0.5, 8, False, -127.5, 0.0, 0.5)\r\n\r\n def testOp_with8BitsScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 127.4, 8, False, 0.0, 127.5, 0.5)\r\n\r\n # 8 bits, narrow range.\r\n def testOp_with8BitsNarrowRangeNoScalingNoNudging(self):\r\n self._TestOp(0.0, 254.0, 8, True, 0.0, 254.0, 1.0)\r\n\r\n def testOp_with8BitsNarrowRangeScalingAndNudgingDown(self):\r\n self._TestOp(0.1, 127.1, 8, True, 0.0, 127.0, 0.5)\r\n\r\n def testOp_with8BitsNarrowRangeScalingAndNudgingUp(self):\r\n self._TestOp(-127.1, -0.1, 8, True, -127.0, 0.0, 0.5)\r\n\r\n def testOp_with8BitsNarrowRangeScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 126.9, 8, True, 0.0, 127.0, 0.5)\r\n\r\n # 7 bits, wide range.\r\n def testOp_with7BitsNoScalingNoNudging(self):\r\n self._TestOp(0.0, 127.0, 7, False, 0.0, 127.0, 1.0)\r\n\r\n def testOp_with7BitsScalingAndNudgingDown(self):\r\n self._TestOp(0.5, 64.0, 7, False, 0.0, 63.5, 0.5)\r\n\r\n def testOp_with7BitsScalingAndNudgingUp(self):\r\n self._TestOp(-64.0, -0.5, 7, False, -63.5, 0.0, 0.5)\r\n\r\n def testOp_with7BitsScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 63.4, 7, False, 0.0, 63.5, 0.5)\r\n\r\n # 7 bits, narrow range.\r\n def testOp_with7BitsNarrowRangeNoScalingNoNudging(self):\r\n self._TestOp(0.0, 126.0, 7, True, 0.0, 126.0, 1.0)\r\n\r\n def testOp_with7BitsNarrowRangeScalingAndNudgingDown(self):\r\n self._TestOp(0.1, 63.1, 7, True, 0.0, 63.0, 0.5)\r\n\r\n def testOp_with7BitsNarrowRangeScalingAndNudgingUp(self):\r\n self._TestOp(-63.1, -0.1, 7, True, -63.0, 0.0, 0.5)\r\n\r\n def testOp_with7BitsNarrowRangeScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 62.9, 7, True, 0.0, 63.0, 0.5)\r\n\r\n def _TestOp(self, input_min, input_max, num_bits, narrow_range,\r\n expected_nudged_input_min, expected_nudged_input_max,\r\n expected_step):\r\n inputs = np.array(\r\n [\r\n expected_nudged_input_min - expected_step,\r\n expected_nudged_input_min - 0.01, expected_nudged_input_min,\r\n expected_nudged_input_min + 0.01,\r\n expected_nudged_input_min + expected_step - 0.01,\r\n expected_nudged_input_min + expected_step,\r\n expected_nudged_input_min + expected_step + 0.01,\r\n expected_nudged_input_max - 0.01, expected_nudged_input_max,\r\n expected_nudged_input_max + 0.01,\r\n expected_nudged_input_max + expected_step\r\n ],\r\n dtype=np.float32)\r\n gradients = np.arange(1, len(inputs) + 1, dtype=np.float32)\r\n expected_backprops = np.array(\r\n [0.0, 0.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 0.0, 0.0],\r\n dtype=np.float32)\r\n\r\n with self.session() as session:\r\n with self.test_scope():\r\n gradient_placeholder = array_ops.placeholder(\r\n dtypes.float32, gradients.shape, name=\"gradients\")\r\n input_placeholder = array_ops.placeholder(\r\n dtypes.float32, inputs.shape, name=\"inputs\")\r\n outputs = gen_array_ops.fake_quant_with_min_max_args_gradient(\r\n gradient_placeholder,\r\n input_placeholder,\r\n min=input_min,\r\n max=input_max,\r\n num_bits=num_bits,\r\n narrow_range=narrow_range)\r\n backprops = session.run(outputs, {\r\n gradient_placeholder: gradients,\r\n input_placeholder: inputs\r\n })\r\n self.assertAllCloseAccordingToType(\r\n backprops,\r\n expected_backprops,\r\n rtol=1e-3,\r\n atol=1e-5,\r\n bfloat16_rtol=0.03)\r\n\r\n\r\nclass FakeQuantWithMinMaxVarsTest(xla_test.XLATestCase):\r\n \"\"\"Test cases for FakeQuantWithMinMaxVars operation.\"\"\"\r\n\r\n # 8 bits, wide range.\r\n def testOp_with8BitsNoScalingNoNudging(self):\r\n self._TestOp(0.0, 255.0, 8, False, 0.0, 255.0, 1.0)\r\n\r\n def testOp_with8BitsScalingAndNudgingDown(self):\r\n self._TestOp(0.5, 128.0, 8, False, 0.0, 127.5, 0.5)\r\n\r\n def testOp_with8BitsScalingAndNudgingUp(self):\r\n self._TestOp(-128.0, -0.5, 8, False, -127.5, 0.0, 0.5)\r\n\r\n def testOp_with8BitsScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 127.4, 8, False, 0.0, 127.5, 0.5)\r\n\r\n # 8 bits, narrow range.\r\n def testOp_with8BitsNarrowRangeNoScalingNoNudging(self):\r\n self._TestOp(0.0, 254.0, 8, True, 0.0, 254.0, 1.0)\r\n\r\n def testOp_with8BitsNarrowRangeScalingAndNudgingDown(self):\r\n self._TestOp(0.1, 127.1, 8, True, 0.0, 127.0, 0.5)\r\n\r\n def testOp_with8BitsNarrowRangeScalingAndNudgingUp(self):\r\n self._TestOp(-127.1, -0.1, 8, True, -127.0, 0.0, 0.5)\r\n\r\n def testOp_with8BitsNarrowRangeScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 126.9, 8, True, 0.0, 127.0, 0.5)\r\n\r\n # 7 bits, wide range.\r\n def testOp_with7BitsNoScalingNoNudging(self):\r\n self._TestOp(0.0, 127.0, 7, False, 0.0, 127.0, 1.0)\r\n\r\n def testOp_with7BitsScalingAndNudgingDown(self):\r\n self._TestOp(0.5, 64.0, 7, False, 0.0, 63.5, 0.5)\r\n\r\n def testOp_with7BitsScalingAndNudgingUp(self):\r\n self._TestOp(-64.0, -0.5, 7, False, -63.5, 0.0, 0.5)\r\n\r\n def testOp_with7BitsScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 63.4, 7, False, 0.0, 63.5, 0.5)\r\n\r\n # 7 bits, narrow range.\r\n def testOp_with7BitsNarrowRangeNoScalingNoNudging(self):\r\n self._TestOp(0.0, 126.0, 7, True, 0.0, 126.0, 1.0)\r\n\r\n def testOp_with7BitsNarrowRangeScalingAndNudgingDown(self):\r\n self._TestOp(0.1, 63.1, 7, True, 0.0, 63.0, 0.5)\r\n\r\n def testOp_with7BitsNarrowRangeScalingAndNudgingUp(self):\r\n self._TestOp(-63.1, -0.1, 7, True, -63.0, 0.0, 0.5)\r\n\r\n def testOp_with7BitsNarrowRangeScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 62.9, 7, True, 0.0, 63.0, 0.5)\r\n\r\n def _TestOp(self, input_min, input_max, num_bits, narrow_range,\r\n expected_nudged_input_min, expected_nudged_input_max,\r\n expected_step):\r\n inputs = np.array(\r\n [\r\n expected_nudged_input_min - expected_step,\r\n expected_nudged_input_min - 0.01, expected_nudged_input_min,\r\n expected_nudged_input_min + 0.01,\r\n expected_nudged_input_min + expected_step - 0.01,\r\n expected_nudged_input_min + expected_step,\r\n expected_nudged_input_min + expected_step + 0.01,\r\n expected_nudged_input_max - 0.01, expected_nudged_input_max,\r\n expected_nudged_input_max + 0.01,\r\n expected_nudged_input_max + expected_step\r\n ],\r\n dtype=np.float32)\r\n expected = np.array(\r\n [\r\n expected_nudged_input_min, expected_nudged_input_min,\r\n expected_nudged_input_min, expected_nudged_input_min,\r\n expected_nudged_input_min + expected_step,\r\n expected_nudged_input_min + expected_step,\r\n expected_nudged_input_min + expected_step,\r\n expected_nudged_input_max, expected_nudged_input_max,\r\n expected_nudged_input_max, expected_nudged_input_max\r\n ],\r\n dtype=np.float32)\r\n\r\n with self.session() as session:\r\n with self.test_scope():\r\n input_placeholder = array_ops.placeholder(\r\n dtypes.float32, inputs.shape, name=\"inputs\")\r\n min_placeholder = array_ops.placeholder(dtypes.float32, (), name=\"min\")\r\n max_placeholder = array_ops.placeholder(dtypes.float32, (), name=\"max\")\r\n outputs = array_ops.fake_quant_with_min_max_vars(\r\n input_placeholder,\r\n min_placeholder,\r\n max_placeholder,\r\n num_bits=num_bits,\r\n narrow_range=narrow_range)\r\n result = session.run(\r\n outputs, {\r\n input_placeholder: inputs,\r\n min_placeholder: input_min,\r\n max_placeholder: input_max\r\n })\r\n self.assertAllCloseAccordingToType(\r\n result, expected, rtol=1e-3, atol=1e-5, bfloat16_rtol=0.03)\r\n\r\n\r\nclass FakeQuantWithMinMaxVarsGradientTest(xla_test.XLATestCase):\r\n \"\"\"Test cases for FakeQuantWithMinMaxVarsGradient operation.\"\"\"\r\n\r\n # 8 bits, wide range.\r\n def testOp_with8BitsNoScalingNoNudging(self):\r\n self._TestOp(0.0, 255.0, 8, False, 0.0, 255.0, 1.0)\r\n\r\n def testOp_with8BitsScalingAndNudgingDown(self):\r\n self._TestOp(0.5, 128.0, 8, False, 0.0, 127.5, 0.5)\r\n\r\n def testOp_with8BitsScalingAndNudgingUp(self):\r\n self._TestOp(-128.0, -0.5, 8, False, -127.5, 0.0, 0.5)\r\n\r\n def testOp_with8BitsScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 127.4, 8, False, 0.0, 127.5, 0.5)\r\n\r\n # 8 bits, narrow range.\r\n def testOp_with8BitsNarrowRangeNoScalingNoNudging(self):\r\n self._TestOp(0.0, 254.0, 8, True, 0.0, 254.0, 1.0)\r\n\r\n def testOp_with8BitsNarrowRangeScalingAndNudgingDown(self):\r\n self._TestOp(0.1, 127.1, 8, True, 0.0, 127.0, 0.5)\r\n\r\n def testOp_with8BitsNarrowRangeScalingAndNudgingUp(self):\r\n self._TestOp(-127.1, -0.1, 8, True, -127.0, 0.0, 0.5)\r\n\r\n def testOp_with8BitsNarrowRangeScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 126.9, 8, True, 0.0, 127.0, 0.5)\r\n\r\n # 7 bits, wide range.\r\n def testOp_with7BitsNoScalingNoNudging(self):\r\n self._TestOp(0.0, 127.0, 7, False, 0.0, 127.0, 1.0)\r\n\r\n def testOp_with7BitsScalingAndNudgingDown(self):\r\n self._TestOp(0.5, 64.0, 7, False, 0.0, 63.5, 0.5)\r\n\r\n def testOp_with7BitsScalingAndNudgingUp(self):\r\n self._TestOp(-64.0, -0.5, 7, False, -63.5, 0.0, 0.5)\r\n\r\n def testOp_with7BitsScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 63.4, 7, False, 0.0, 63.5, 0.5)\r\n\r\n # 7 bits, narrow range.\r\n def testOp_with7BitsNarrowRangeNoScalingNoNudging(self):\r\n self._TestOp(0.0, 126.0, 7, True, 0.0, 126.0, 1.0)\r\n\r\n def testOp_with7BitsNarrowRangeScalingAndNudgingDown(self):\r\n self._TestOp(0.1, 63.1, 7, True, 0.0, 63.0, 0.5)\r\n\r\n def testOp_with7BitsNarrowRangeScalingAndNudgingUp(self):\r\n self._TestOp(-63.1, -0.1, 7, True, -63.0, 0.0, 0.5)\r\n\r\n def testOp_with7BitsNarrowRangeScalingAndNudgingBetween(self):\r\n self._TestOp(-0.1, 62.9, 7, True, 0.0, 63.0, 0.5)\r\n\r\n def _TestOp(self, input_min, input_max, num_bits, narrow_range,\r\n expected_nudged_input_min, expected_nudged_input_max,\r\n expected_step):\r\n inputs = np.array(\r\n [\r\n expected_nudged_input_min - expected_step,\r\n expected_nudged_input_min - 0.01, expected_nudged_input_min,\r\n expected_nudged_input_min + 0.01,\r\n expected_nudged_input_min + expected_step - 0.01,\r\n expected_nudged_input_min + expected_step,\r\n expected_nudged_input_min + expected_step + 0.01,\r\n expected_nudged_input_max - 0.01, expected_nudged_input_max,\r\n expected_nudged_input_max + 0.01,\r\n expected_nudged_input_max + expected_step\r\n ],\r\n dtype=np.float32)\r\n gradients = np.arange(1, len(inputs) + 1, dtype=np.float32)\r\n expected_backprops_wrt_input = np.array(\r\n [0.0, 0.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 0.0, 0.0],\r\n dtype=np.float32)\r\n expected_backprops_wrt_min = 1.0 + 2.0\r\n expected_backprops_wrt_max = 10.0 + 11.0\r\n\r\n with self.session() as session:\r\n with self.test_scope():\r\n gradient_placeholder = array_ops.placeholder(\r\n dtypes.float32, gradients.shape, name=\"gradients\")\r\n input_placeholder = array_ops.placeholder(\r\n dtypes.float32, inputs.shape, name=\"inputs\")\r\n min_placeholder = array_ops.placeholder(dtypes.float32, (), name=\"min\")\r\n max_placeholder = array_ops.placeholder(dtypes.float32, (), name=\"max\")\r\n outputs = array_ops.fake_quant_with_min_max_vars_gradient(\r\n gradient_placeholder,\r\n input_placeholder,\r\n min_placeholder,\r\n max_placeholder,\r\n num_bits=num_bits,\r\n narrow_range=narrow_range)\r\n backprops_wrt_input, backprops_wrt_min, backprops_wrt_max = session.run(\r\n outputs, {\r\n gradient_placeholder: gradients,\r\n input_placeholder: inputs,\r\n min_placeholder: input_min,\r\n max_placeholder: input_max\r\n })\r\n self.assertAllCloseAccordingToType(\r\n backprops_wrt_input,\r\n expected_backprops_wrt_input,\r\n rtol=1e-3,\r\n atol=1e-5,\r\n bfloat16_rtol=0.03)\r\n self.assertAllCloseAccordingToType(\r\n backprops_wrt_min,\r\n expected_backprops_wrt_min,\r\n rtol=1e-3,\r\n atol=1e-5,\r\n bfloat16_rtol=0.03)\r\n self.assertAllCloseAccordingToType(\r\n backprops_wrt_max,\r\n expected_backprops_wrt_max,\r\n rtol=1e-3,\r\n atol=1e-5,\r\n bfloat16_rtol=0.03)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n googletest.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ======================================\r\n\"\"\"Defines the `Topology` class, that describes a TPU fabric topology.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nfrom six.moves import xrange # pylint: disable=redefined-builtin\r\n\r\nfrom tensorflow.core.protobuf.tpu import topology_pb2\r\n\r\n\r\ndef _tpu_device_name(job, task, device):\r\n \"\"\"Returns the device name for the TPU `device` on `task` of `job`.\"\"\"\r\n if job is None:\r\n return \"/task:%d/device:TPU:%d\" % (task, device)\r\n else:\r\n return \"/job:%s/task:%d/device:TPU:%d\" % (job, task, device)\r\n\r\n\r\ndef _tpu_host_device_name(job, task):\r\n \"\"\"Returns the device name for the CPU device on `task` of `job`.\"\"\"\r\n if job is None:\r\n return \"/task:%d/device:CPU:0\" % task\r\n else:\r\n return \"/job:%s/task:%d/device:CPU:0\" % (job, task)\r\n\r\n\r\nclass Topology(object):\r\n \"\"\"Describes a set of TPU devices.\r\n\r\n Represents both the shape of the physical mesh, and the mapping between\r\n TensorFlow TPU devices to physical mesh coordinates.\r\n \"\"\"\r\n\r\n def __init__(self, serialized=None, mesh_shape=None, device_coordinates=None):\r\n \"\"\"Builds a Topology object.\r\n\r\n If `serialized` is not `None`, the topology is parsed from `serialized` and\r\n the other arguments are ignored. Otherwise, the topology is computed from\r\n `mesh_shape` and `device_coordinates`.\r\n\r\n Args:\r\n serialized: A serialized `TopologyProto`, or `None`. If not `None`, the\r\n serialized proto is parsed to discover the topology.\r\n mesh_shape: A sequence of 3 positive integers, or `None`. If not `None`,\r\n the shape of the TPU topology, in number of cores. Ignored if\r\n `serialized` is not `None`.\r\n device_coordinates: A rank 3 numpy array that describes the mapping from\r\n TensorFlow TPU devices to TPU fabric coordinates, or `None`. Ignored\r\n if `serialized is not `None`.\r\n\r\n Raises:\r\n ValueError: If `serialized` does not describe a well-formed topology.\r\n ValueError: If `serialized` is `None` and `mesh_shape` is not a sequence\r\n of 3 positive integers.\r\n ValueError: If `serialized` is `None` and `device_coordinates` is not a\r\n rank 3 numpy int32 array that describes a valid coordinate mapping.\r\n \"\"\"\r\n\r\n self._serialized = serialized\r\n\r\n if serialized:\r\n self._parse_topology(serialized)\r\n else:\r\n self._mesh_shape = np.asarray(mesh_shape, dtype=np.int32)\r\n self._device_coordinates = np.asarray(device_coordinates, np.int32)\r\n if len(self._mesh_shape) != 3 or any(self._mesh_shape < 1):\r\n raise ValueError(\"`mesh_shape` must be a sequence of 3 positive \"\r\n \"entries; got {}\".format(self._mesh_shape))\r\n\r\n if (len(self._device_coordinates.shape) != 3 or\r\n self._device_coordinates.shape[2] != len(self._mesh_shape)):\r\n raise ValueError(\"`device_coordinates` must be a rank 3 int32 array \"\r\n \"with minor dimension equal to the mesh shape rank\")\r\n\r\n self._topology_tasks, self._topology_devices = self._invert_topology()\r\n\r\n def _parse_topology(self, serialized):\r\n \"\"\"Parses a serialized `TopologyProto` into `self`.\"\"\"\r\n proto = topology_pb2.TopologyProto()\r\n proto.ParseFromString(serialized)\r\n\r\n self._mesh_shape = np.array(proto.mesh_shape, dtype=np.int32)\r\n if len(self._mesh_shape) != 3 or any(self._mesh_shape < 1):\r\n raise ValueError(\"`mesh_shape` must be a vector of size 3 with positive \"\r\n \"entries; got {}\".format(self._mesh_shape))\r\n\r\n if proto.num_tasks < 0:\r\n raise ValueError(\"`num_tasks` must be >= 0; got {}\".format(\r\n proto.num_tasks))\r\n if proto.num_tpu_devices_per_task < 0:\r\n raise ValueError(\"`num_tpu_devices_per_task` must be >= 0; got {}\".format(\r\n proto.num_tpu_devices_per_task))\r\n\r\n expected_coordinates_size = (\r\n proto.num_tasks * proto.num_tpu_devices_per_task * len(\r\n proto.mesh_shape))\r\n if len(proto.device_coordinates) != expected_coordinates_size:\r\n raise ValueError(\"`device_coordinates` must have shape num_tasks ({}) * \"\r\n \"num_tpu_devices_per_task ({}) * len(mesh_shape) ({}); \"\r\n \"got shape {}\".format(proto.num_tasks,\r\n proto.num_tpu_devices_per_task,\r\n proto.mesh_shape,\r\n len(proto.device_coordinates)))\r\n\r\n coords = np.array(proto.device_coordinates, dtype=np.int32)\r\n if any(coords < 0):\r\n raise ValueError(\"`device_coordinates` must be >= 0\")\r\n coords = coords.reshape((proto.num_tasks, proto.num_tpu_devices_per_task,\r\n len(proto.mesh_shape)))\r\n self._device_coordinates = coords\r\n\r\n def _invert_topology(self):\r\n \"\"\"Inverts a [task,device,axis] topology to [x,y,z] -> task/device maps.\"\"\"\r\n tasks = np.full(list(self.mesh_shape), -1, dtype=np.int32)\r\n devices = np.full(list(self.mesh_shape), -1, dtype=np.int32)\r\n for task in xrange(self.device_coordinates.shape[0]):\r\n for device in xrange(self.device_coordinates.shape[1]):\r\n x, y, z = self.device_coordinates[task, device, :]\r\n tasks[x, y, z] = task\r\n devices[x, y, z] = device\r\n return tasks, devices\r\n\r\n @property\r\n def mesh_shape(self):\r\n \"\"\"A rank 1 int32 array describing the shape of the TPU topology.\"\"\"\r\n return self._mesh_shape\r\n\r\n @property\r\n def mesh_rank(self):\r\n \"\"\"Returns the number of dimensions in the mesh.\"\"\"\r\n return len(self._mesh_shape)\r\n\r\n @property\r\n def device_coordinates(self):\r\n \"\"\"Describes the mapping from TPU devices to topology coordinates.\r\n\r\n Returns:\r\n A rank 3 int32 array with shape `[tasks, devices, axis]`.\r\n `tasks` is the number of tasks in the TPU cluster, `devices` is the number\r\n of TPU devices per task, and `axis` is the number of axes in the TPU\r\n cluster topology. Each entry gives the `axis`-th coordinate in the\r\n topology of a task/device pair. TPU topologies are 3-dimensional, with\r\n dimensions `(x, y, core number)`.\r\n \"\"\"\r\n return self._device_coordinates\r\n\r\n def task_ordinal_at_coordinates(self, device_coordinates):\r\n \"\"\"Returns the TensorFlow task number attached to `device_coordinates`.\r\n\r\n Args:\r\n device_coordinates: An integer sequence describing a device's physical\r\n coordinates in the TPU fabric.\r\n\r\n Returns:\r\n Returns the TensorFlow task number that contains the TPU device with those\r\n physical coordinates.\r\n \"\"\"\r\n return self._topology_tasks[tuple(device_coordinates)]\r\n\r\n def tpu_device_ordinal_at_coordinates(self, device_coordinates):\r\n \"\"\"Returns the TensorFlow device number at `device_coordinates`.\r\n\r\n Args:\r\n device_coordinates: An integer sequence describing a device's physical\r\n coordinates in the TPU fabric.\r\n\r\n Returns:\r\n Returns the TensorFlow device number within the task corresponding to\r\n attached to the device with those physical coordinates.\r\n \"\"\"\r\n return self._topology_devices[tuple(device_coordinates)]\r\n\r\n def cpu_device_name_at_coordinates(self, device_coordinates, job=None):\r\n \"\"\"Returns the CPU device attached to a logical core.\"\"\"\r\n return _tpu_host_device_name(\r\n job, self._topology_tasks[tuple(device_coordinates)])\r\n\r\n def tpu_device_name_at_coordinates(self, device_coordinates, job=None):\r\n \"\"\"Returns the name of the TPU device assigned to a logical core.\"\"\"\r\n return _tpu_device_name(job,\r\n self._topology_tasks[tuple(device_coordinates)],\r\n self._topology_devices[tuple(device_coordinates)])\r\n\r\n @property\r\n def num_tasks(self):\r\n \"\"\"Returns the number of TensorFlow tasks in the TPU slice.\"\"\"\r\n return self._device_coordinates.shape[0]\r\n\r\n @property\r\n def num_tpus_per_task(self):\r\n \"\"\"Returns the number of TPU devices per task in the TPU slice.\"\"\"\r\n return self._device_coordinates.shape[1]\r\n\r\n def serialized(self):\r\n \"\"\"Returns the serialized form of the topology.\"\"\"\r\n if self._serialized is None:\r\n proto = topology_pb2.TopologyProto()\r\n proto.mesh_shape[:] = list(self._mesh_shape)\r\n proto.num_tasks = self._device_coordinates.shape[0]\r\n proto.num_tpu_devices_per_task = self._device_coordinates.shape[1]\r\n proto.device_coordinates.extend(list(self._device_coordinates.flatten()))\r\n self._serialized = proto.SerializeToString()\r\n\r\n return self._serialized\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Some common SessionRunHook classes.\r\n\r\nNote that the symbols that are exported to v1 tf.train namespace are also\r\nexported to v2 in tf.estimator namespace. See\r\nhttps://github.com/uve/estimator/blob/master/tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks.py\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport time\r\n\r\nimport numpy as np\r\nimport six\r\n\r\nfrom tensorflow.core.framework.summary_pb2 import Summary\r\nfrom tensorflow.core.protobuf import config_pb2\r\nfrom tensorflow.core.util.event_pb2 import SessionLog\r\nfrom tensorflow.python.client import timeline\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import errors\r\nfrom tensorflow.python.framework import meta_graph\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import init_ops\r\nfrom tensorflow.python.ops import variable_scope\r\nfrom tensorflow.python.platform import gfile\r\nfrom tensorflow.python.platform import tf_logging as logging\r\nfrom tensorflow.python.training import session_run_hook\r\nfrom tensorflow.python.training import training_util\r\nfrom tensorflow.python.training.session_run_hook import SessionRunArgs\r\nfrom tensorflow.python.training.summary_io import SummaryWriterCache\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n_HOOKS = \"hooks\"\r\n_STEPS_PER_RUN_VAR = \"steps_per_run\"\r\n\r\n\r\nclass _HookTimer(object):\r\n \"\"\"Base timer for determining when Hooks should trigger.\r\n\r\n Should not be instantiated directly.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def reset(self):\r\n \"\"\"Resets the timer.\"\"\"\r\n pass\r\n\r\n def should_trigger_for_step(self, step):\r\n \"\"\"Return true if the timer should trigger for the specified step.\"\"\"\r\n raise NotImplementedError\r\n\r\n def update_last_triggered_step(self, step):\r\n \"\"\"Update the last triggered time and step number.\r\n\r\n Args:\r\n step: The current step.\r\n\r\n Returns:\r\n A pair `(elapsed_time, elapsed_steps)`, where `elapsed_time` is the number\r\n of seconds between the current trigger and the last one (a float), and\r\n `elapsed_steps` is the number of steps between the current trigger and\r\n the last one. Both values will be set to `None` on the first trigger.\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n def last_triggered_step(self):\r\n \"\"\"Returns the last triggered time step or None if never triggered.\"\"\"\r\n raise NotImplementedError\r\n\r\n\r\n@tf_export(v1=[\"train.SecondOrStepTimer\"])\r\nclass SecondOrStepTimer(_HookTimer):\r\n \"\"\"Timer that triggers at most once every N seconds or once every N steps.\r\n\r\n This symbol is also exported to v2 in tf.estimator namespace. See\r\n https://github.com/uve/estimator/blob/master/tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks.py\r\n \"\"\"\r\n\r\n def __init__(self, every_secs=None, every_steps=None):\r\n self.reset()\r\n self._every_secs = every_secs\r\n self._every_steps = every_steps\r\n\r\n if self._every_secs is None and self._every_steps is None:\r\n raise ValueError(\"Either every_secs or every_steps should be provided.\")\r\n if (self._every_secs is not None) and (self._every_steps is not None):\r\n raise ValueError(\"Can not provide both every_secs and every_steps.\")\r\n\r\n super(SecondOrStepTimer, self).__init__()\r\n\r\n def reset(self):\r\n self._last_triggered_step = None\r\n self._last_triggered_time = None\r\n\r\n def should_trigger_for_step(self, step):\r\n \"\"\"Return true if the timer should trigger for the specified step.\r\n\r\n Args:\r\n step: Training step to trigger on.\r\n\r\n Returns:\r\n True if the difference between the current time and the time of the last\r\n trigger exceeds `every_secs`, or if the difference between the current\r\n step and the last triggered step exceeds `every_steps`. False otherwise.\r\n \"\"\"\r\n if self._last_triggered_step is None:\r\n return True\r\n\r\n if self._last_triggered_step == step:\r\n return False\r\n\r\n if self._every_secs is not None:\r\n if time.time() >= self._last_triggered_time + self._every_secs:\r\n return True\r\n\r\n if self._every_steps is not None:\r\n if step >= self._last_triggered_step + self._every_steps:\r\n return True\r\n\r\n return False\r\n\r\n def update_last_triggered_step(self, step):\r\n current_time = time.time()\r\n if self._last_triggered_time is None:\r\n elapsed_secs = None\r\n elapsed_steps = None\r\n else:\r\n elapsed_secs = current_time - self._last_triggered_time\r\n elapsed_steps = step - self._last_triggered_step\r\n\r\n self._last_triggered_time = current_time\r\n self._last_triggered_step = step\r\n return (elapsed_secs, elapsed_steps)\r\n\r\n def last_triggered_step(self):\r\n return self._last_triggered_step\r\n\r\n\r\nclass NeverTriggerTimer(_HookTimer):\r\n \"\"\"Timer that never triggers.\"\"\"\r\n\r\n def should_trigger_for_step(self, step):\r\n _ = step\r\n return False\r\n\r\n def update_last_triggered_step(self, step):\r\n _ = step\r\n return (None, None)\r\n\r\n def last_triggered_step(self):\r\n return None\r\n\r\n\r\n@tf_export(v1=[\"train.LoggingTensorHook\"])\r\nclass LoggingTensorHook(session_run_hook.SessionRunHook):\r\n \"\"\"Prints the given tensors every N local steps, every N seconds, or at end.\r\n\r\n The tensors will be printed to the log, with `INFO` severity. If you are not\r\n seeing the logs, you might want to add the following line after your imports:\r\n\r\n ```python\r\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)\r\n ```\r\n\r\n Note that if `at_end` is True, `tensors` should not include any tensor\r\n whose evaluation produces a side effect such as consuming additional inputs.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n tensors,\r\n every_n_iter=None,\r\n every_n_secs=None,\r\n at_end=False,\r\n formatter=None):\r\n \"\"\"Initializes a `LoggingTensorHook`.\r\n\r\n Args:\r\n tensors: `dict` that maps string-valued tags to tensors/tensor names, or\r\n `iterable` of tensors/tensor names.\r\n every_n_iter: `int`, print the values of `tensors` once every N local\r\n steps taken on the current worker.\r\n every_n_secs: `int` or `float`, print the values of `tensors` once every N\r\n seconds. Exactly one of `every_n_iter` and `every_n_secs` should be\r\n provided.\r\n at_end: `bool` specifying whether to print the values of `tensors` at the\r\n end of the run.\r\n formatter: function, takes dict of `tag`->`Tensor` and returns a string.\r\n If `None` uses default printing all tensors.\r\n\r\n Raises:\r\n ValueError: if `every_n_iter` is non-positive.\r\n \"\"\"\r\n only_log_at_end = (\r\n at_end and (every_n_iter is None) and (every_n_secs is None))\r\n if (not only_log_at_end and\r\n (every_n_iter is None) == (every_n_secs is None)):\r\n raise ValueError(\r\n \"either at_end and/or exactly one of every_n_iter and every_n_secs \"\r\n \"must be provided.\")\r\n if every_n_iter is not None and every_n_iter <= 0:\r\n raise ValueError(\"invalid every_n_iter=%s.\" % every_n_iter)\r\n if not isinstance(tensors, dict):\r\n self._tag_order = tensors\r\n tensors = {item: item for item in tensors}\r\n else:\r\n self._tag_order = sorted(tensors.keys())\r\n self._tensors = tensors\r\n self._formatter = formatter\r\n self._timer = (\r\n NeverTriggerTimer() if only_log_at_end else SecondOrStepTimer(\r\n every_secs=every_n_secs, every_steps=every_n_iter))\r\n self._log_at_end = at_end\r\n\r\n def begin(self):\r\n self._timer.reset()\r\n self._iter_count = 0\r\n # Convert names to tensors if given\r\n self._current_tensors = {\r\n tag: _as_graph_element(tensor)\r\n for (tag, tensor) in self._tensors.items()\r\n }\r\n\r\n def before_run(self, run_context): # pylint: disable=unused-argument\r\n self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)\r\n if self._should_trigger:\r\n return SessionRunArgs(self._current_tensors)\r\n else:\r\n return None\r\n\r\n def _log_tensors(self, tensor_values):\r\n original = np.get_printoptions()\r\n np.set_printoptions(suppress=True)\r\n elapsed_secs, _ = self._timer.update_last_triggered_step(self._iter_count)\r\n if self._formatter:\r\n logging.info(self._formatter(tensor_values))\r\n else:\r\n stats = []\r\n for tag in self._tag_order:\r\n stats.append(\"%s = %s\" % (tag, tensor_values[tag]))\r\n if elapsed_secs is not None:\r\n logging.info(\"%s (%.3f sec)\", \", \".join(stats), elapsed_secs)\r\n else:\r\n logging.info(\"%s\", \", \".join(stats))\r\n np.set_printoptions(**original)\r\n\r\n def after_run(self, run_context, run_values):\r\n _ = run_context\r\n if self._should_trigger:\r\n self._log_tensors(run_values.results)\r\n\r\n self._iter_count += 1\r\n\r\n def end(self, session):\r\n if self._log_at_end:\r\n values = session.run(self._current_tensors)\r\n self._log_tensors(values)\r\n\r\n\r\ndef get_or_create_steps_per_run_variable():\r\n \"\"\"Gets or creates the steps_per_run variable.\r\n\r\n In Estimator, the user provided computation, the model_fn, is wrapped\r\n inside a tf.while_loop for peak performance. The iterations of the loop are\r\n specified by this variable, which adjusts its value on the CPU after each\r\n device program execution and before the next execution.\r\n\r\n The purpose of using a variable, rather than a constant, is to allow\r\n Estimator adapt the device training iterations according to the final steps\r\n specified by users. For example, if the user sets the steps_per_run as\r\n 4 and steps as 10 in Estimator.train(), the steps_per_run\r\n variable will have the following value before each training run.\r\n\r\n - 1-st execution: steps_per_run = 4\r\n - 2-nd execution: steps_per_run = 4\r\n - 3-rd execution: steps_per_run = 2\r\n\r\n As model_fn increases the global step once per train_op invocation, the global\r\n step is 10 after all executions, matching the steps=10 inputs passed in by\r\n users.\r\n\r\n Returns:\r\n A TF non-trainable resource variable.\r\n\r\n Raises:\r\n RuntimeError: If multi steps_per_run variables were found.\r\n \"\"\"\r\n graph = ops.get_default_graph()\r\n collection_name = \"{}_{}\".format(_HOOKS, _STEPS_PER_RUN_VAR)\r\n steps_per_run_vars = graph.get_collection(collection_name)\r\n if len(steps_per_run_vars) == 1:\r\n return steps_per_run_vars[0]\r\n elif len(steps_per_run_vars) > 1:\r\n raise RuntimeError(\"Multiple steps_per_run_var in collection.\")\r\n\r\n with variable_scope.variable_scope(_HOOKS, reuse=variable_scope.AUTO_REUSE):\r\n return variable_scope.get_variable(\r\n _STEPS_PER_RUN_VAR,\r\n initializer=init_ops.ones_initializer(),\r\n shape=[],\r\n dtype=dtypes.int32,\r\n trainable=False,\r\n collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],\r\n use_resource=True)\r\n\r\n\r\nclass _MultiStepStopAtStepHook(session_run_hook.SessionRunHook):\r\n \"\"\"Hook that requests stop at a specified step.\"\"\"\r\n\r\n def __init__(self, num_steps=None, last_step=None, steps_per_run=1):\r\n \"\"\"Initializes a `MultiStepStopAtStepHook`.\r\n\r\n This hook requests stop after either a number of steps have been\r\n executed or a last step has been reached. Only one of the two options can be\r\n specified.\r\n\r\n if `num_steps` is specified, it indicates the number of steps to execute\r\n after `begin()` is called. If instead `last_step` is specified, it\r\n indicates the last step we want to execute, as passed to the `after_run()`\r\n call.\r\n\r\n In Estimator, the user provided computation, the model_fn, is wrapped\r\n inside a tf.while_loop for peak performance. The steps_per_run variable\r\n determines the number of iterations of the loop before returning to the CPU.\r\n\r\n Args:\r\n num_steps: Number of steps to execute.\r\n last_step: Step after which to stop.\r\n steps_per_run: Number of steps executed per run call.\r\n\r\n Raises:\r\n ValueError: If one of the arguments is invalid.\r\n \"\"\"\r\n if num_steps is None and last_step is None:\r\n raise ValueError(\"One of num_steps or last_step must be specified.\")\r\n if num_steps is not None and last_step is not None:\r\n raise ValueError(\"Only one of num_steps or last_step can be specified.\")\r\n if steps_per_run is None or steps_per_run < 1:\r\n raise ValueError(\"steps_per_run should be greater than 0\")\r\n self._num_steps = num_steps\r\n self._last_step = last_step\r\n self._steps_per_run_initial_value = steps_per_run\r\n\r\n def begin(self):\r\n self._global_step_tensor = training_util.get_global_step()\r\n if self._global_step_tensor is None:\r\n raise RuntimeError(\"Global step should be created to use StopAtStepHook.\")\r\n self._steps_per_run_variable = get_or_create_steps_per_run_variable()\r\n\r\n def _update_steps_per_run_variable(self, global_step, session):\r\n steps = min(self._last_step - global_step,\r\n self._steps_per_run_initial_value)\r\n self._steps_per_run_variable.load(steps, session=session)\r\n\r\n def after_create_session(self, session, coord):\r\n global_step = session.run(self._global_step_tensor)\r\n if self._last_step is None:\r\n self._last_step = global_step + self._num_steps\r\n self._update_steps_per_run_variable(global_step, session)\r\n\r\n def after_run(self, run_context, run_values):\r\n # Global step cannot be retrieved via SessionRunArgs and before_run due to\r\n # race condition in hook execution.\r\n global_step = run_context.session.run(self._global_step_tensor)\r\n if global_step >= self._last_step:\r\n run_context.request_stop()\r\n else:\r\n self._update_steps_per_run_variable(global_step, run_context.session)\r\n\r\n\r\n@tf_export(v1=[\"train.StopAtStepHook\"])\r\nclass StopAtStepHook(session_run_hook.SessionRunHook):\r\n \"\"\"Hook that requests stop at a specified step.\"\"\"\r\n\r\n def __init__(self, num_steps=None, last_step=None):\r\n \"\"\"Initializes a `StopAtStepHook`.\r\n\r\n This hook requests stop after either a number of steps have been\r\n executed or a last step has been reached. Only one of the two options can be\r\n specified.\r\n\r\n if `num_steps` is specified, it indicates the number of steps to execute\r\n after `begin()` is called. If instead `last_step` is specified, it\r\n indicates the last step we want to execute, as passed to the `after_run()`\r\n call.\r\n\r\n Args:\r\n num_steps: Number of steps to execute.\r\n last_step: Step after which to stop.\r\n\r\n Raises:\r\n ValueError: If one of the arguments is invalid.\r\n \"\"\"\r\n if num_steps is None and last_step is None:\r\n raise ValueError(\"One of num_steps or last_step must be specified.\")\r\n if num_steps is not None and last_step is not None:\r\n raise ValueError(\"Only one of num_steps or last_step can be specified.\")\r\n self._num_steps = num_steps\r\n self._last_step = last_step\r\n\r\n def begin(self):\r\n self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access\r\n if self._global_step_tensor is None:\r\n raise RuntimeError(\"Global step should be created to use StopAtStepHook.\")\r\n\r\n def after_create_session(self, session, coord):\r\n if self._last_step is None:\r\n global_step = session.run(self._global_step_tensor)\r\n self._last_step = global_step + self._num_steps\r\n\r\n def before_run(self, run_context): # pylint: disable=unused-argument\r\n return SessionRunArgs(self._global_step_tensor)\r\n\r\n def after_run(self, run_context, run_values):\r\n global_step = run_values.results + 1\r\n if global_step >= self._last_step:\r\n # Check latest global step to ensure that the targeted last step is\r\n # reached. global_step read tensor is the value of global step\r\n # before running the operation. We're not sure whether current session.run\r\n # incremented the global_step or not. Here we're checking it.\r\n\r\n step = run_context.session.run(self._global_step_tensor)\r\n if step >= self._last_step:\r\n run_context.request_stop()\r\n\r\n\r\n@tf_export(v1=[\"train.CheckpointSaverListener\"])\r\nclass CheckpointSaverListener(object):\r\n \"\"\"Interface for listeners that take action before or after checkpoint save.\r\n\r\n `CheckpointSaverListener` triggers only in steps when `CheckpointSaverHook` is\r\n triggered, and provides callbacks at the following points:\r\n - before using the session\r\n - before each call to `Saver.save()`\r\n - after each call to `Saver.save()`\r\n - at the end of session\r\n\r\n To use a listener, implement a class and pass the listener to a\r\n `CheckpointSaverHook`, as in this example:\r\n\r\n ```python\r\n class ExampleCheckpointSaverListener(CheckpointSaverListener):\r\n def begin(self):\r\n # You can add ops to the graph here.\r\n print('Starting the session.')\r\n self.your_tensor = ...\r\n\r\n def before_save(self, session, global_step_value):\r\n print('About to write a checkpoint')\r\n\r\n def after_save(self, session, global_step_value):\r\n print('Done writing checkpoint.')\r\n if decided_to_stop_training():\r\n return True\r\n\r\n def end(self, session, global_step_value):\r\n print('Done with the session.')\r\n\r\n ...\r\n listener = ExampleCheckpointSaverListener()\r\n saver_hook = tf.estimator.CheckpointSaverHook(\r\n checkpoint_dir, listeners=[listener])\r\n with\r\n tf.compat.v1.train.MonitoredTrainingSession(chief_only_hooks=[saver_hook]):\r\n ...\r\n ```\r\n\r\n A `CheckpointSaverListener` may simply take some action after every\r\n checkpoint save. It is also possible for the listener to use its own schedule\r\n to act less frequently, e.g. based on global_step_value. In this case,\r\n implementors should implement the `end()` method to handle actions related to\r\n the last checkpoint save. But the listener should not act twice if\r\n `after_save()` already handled this last checkpoint save.\r\n\r\n A `CheckpointSaverListener` can request training to be stopped, by returning\r\n True in `after_save`. Please note that, in replicated distributed training\r\n setting, only `chief` should use this behavior. Otherwise each worker will do\r\n their own evaluation, which may be wasteful of resources.\r\n \"\"\"\r\n\r\n def begin(self):\r\n pass\r\n\r\n def before_save(self, session, global_step_value):\r\n pass\r\n\r\n def after_save(self, session, global_step_value):\r\n pass\r\n\r\n def end(self, session, global_step_value):\r\n pass\r\n\r\n\r\n@tf_export(v1=[\"train.CheckpointSaverHook\"])\r\nclass CheckpointSaverHook(session_run_hook.SessionRunHook):\r\n \"\"\"Saves checkpoints every N steps or seconds.\"\"\"\r\n\r\n def __init__(self,\r\n checkpoint_dir,\r\n save_secs=None,\r\n save_steps=None,\r\n saver=None,\r\n checkpoint_basename=\"model.ckpt\",\r\n scaffold=None,\r\n listeners=None):\r\n \"\"\"Initializes a `CheckpointSaverHook`.\r\n\r\n Args:\r\n checkpoint_dir: `str`, base directory for the checkpoint files.\r\n save_secs: `int`, save every N secs.\r\n save_steps: `int`, save every N steps.\r\n saver: `Saver` object, used for saving.\r\n checkpoint_basename: `str`, base name for the checkpoint files.\r\n scaffold: `Scaffold`, use to get saver object.\r\n listeners: List of `CheckpointSaverListener` subclass instances. Used for\r\n callbacks that run immediately before or after this hook saves the\r\n checkpoint.\r\n\r\n Raises:\r\n ValueError: One of `save_steps` or `save_secs` should be set.\r\n ValueError: At most one of `saver` or `scaffold` should be set.\r\n \"\"\"\r\n logging.info(\"Create CheckpointSaverHook.\")\r\n if saver is not None and scaffold is not None:\r\n raise ValueError(\"You cannot provide both saver and scaffold.\")\r\n self._saver = saver\r\n self._checkpoint_dir = checkpoint_dir\r\n self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)\r\n self._scaffold = scaffold\r\n self._timer = SecondOrStepTimer(\r\n every_secs=save_secs, every_steps=save_steps)\r\n self._listeners = listeners or []\r\n self._steps_per_run = 1\r\n\r\n def _set_steps_per_run(self, steps_per_run):\r\n self._steps_per_run = steps_per_run\r\n\r\n def begin(self):\r\n self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)\r\n self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access\r\n if self._global_step_tensor is None:\r\n raise RuntimeError(\r\n \"Global step should be created to use CheckpointSaverHook.\")\r\n for l in self._listeners:\r\n l.begin()\r\n\r\n def after_create_session(self, session, coord):\r\n global_step = session.run(self._global_step_tensor)\r\n # We do write graph and saver_def at the first call of before_run.\r\n # We cannot do this in begin, since we let other hooks to change graph and\r\n # add variables in begin. Graph is finalized after all begin calls.\r\n training_util.write_graph(\r\n ops.get_default_graph().as_graph_def(add_shapes=True),\r\n self._checkpoint_dir, \"graph.pbtxt\")\r\n saver_def = self._get_saver().saver_def if self._get_saver() else None\r\n graph = ops.get_default_graph()\r\n meta_graph_def = meta_graph.create_meta_graph_def(\r\n graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)\r\n self._summary_writer.add_graph(graph)\r\n self._summary_writer.add_meta_graph(meta_graph_def)\r\n # The checkpoint saved here is the state at step \"global_step\".\r\n self._save(session, global_step)\r\n self._timer.update_last_triggered_step(global_step)\r\n\r\n def before_run(self, run_context): # pylint: disable=unused-argument\r\n return SessionRunArgs(self._global_step_tensor)\r\n\r\n def after_run(self, run_context, run_values):\r\n stale_global_step = run_values.results\r\n if self._timer.should_trigger_for_step(stale_global_step +\r\n self._steps_per_run):\r\n # get the real value after train op.\r\n global_step = run_context.session.run(self._global_step_tensor)\r\n if self._timer.should_trigger_for_step(global_step):\r\n self._timer.update_last_triggered_step(global_step)\r\n if self._save(run_context.session, global_step):\r\n run_context.request_stop()\r\n\r\n def end(self, session):\r\n last_step = session.run(self._global_step_tensor)\r\n if last_step != self._timer.last_triggered_step():\r\n self._save(session, last_step)\r\n for l in self._listeners:\r\n l.end(session, last_step)\r\n\r\n def _save(self, session, step):\r\n \"\"\"Saves the latest checkpoint, returns should_stop.\"\"\"\r\n logging.info(\"Saving checkpoints for %d into %s.\", step, self._save_path)\r\n\r\n for l in self._listeners:\r\n l.before_save(session, step)\r\n\r\n self._get_saver().save(session, self._save_path, global_step=step)\r\n self._summary_writer.add_session_log(\r\n SessionLog(\r\n status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),\r\n step)\r\n\r\n should_stop = False\r\n for l in self._listeners:\r\n if l.after_save(session, step):\r\n logging.info(\r\n \"A CheckpointSaverListener requested that training be stopped. \"\r\n \"listener: {}\".format(l))\r\n should_stop = True\r\n return should_stop\r\n\r\n def _get_saver(self):\r\n if self._saver is not None:\r\n return self._saver\r\n elif self._scaffold is not None:\r\n return self._scaffold.saver\r\n\r\n # Get saver from the SAVERS collection if present.\r\n collection_key = ops.GraphKeys.SAVERS\r\n savers = ops.get_collection(collection_key)\r\n if not savers:\r\n raise RuntimeError(\r\n \"No items in collection {}. Please add a saver to the collection \"\r\n \"or provide a saver or scaffold.\".format(collection_key))\r\n elif len(savers) > 1:\r\n raise RuntimeError(\r\n \"More than one item in collection {}. \"\r\n \"Please indicate which one to use by passing it to the constructor.\"\r\n .format(collection_key))\r\n\r\n self._saver = savers[0]\r\n return savers[0]\r\n\r\n\r\n@tf_export(v1=[\"train.StepCounterHook\"])\r\nclass StepCounterHook(session_run_hook.SessionRunHook):\r\n \"\"\"Hook that counts steps per second.\"\"\"\r\n\r\n def __init__(self,\r\n every_n_steps=100,\r\n every_n_secs=None,\r\n output_dir=None,\r\n summary_writer=None):\r\n\r\n if (every_n_steps is None) == (every_n_secs is None):\r\n raise ValueError(\r\n \"exactly one of every_n_steps and every_n_secs should be provided.\")\r\n self._timer = SecondOrStepTimer(\r\n every_steps=every_n_steps, every_secs=every_n_secs)\r\n\r\n self._summary_writer = summary_writer\r\n self._output_dir = output_dir\r\n self._last_global_step = None\r\n self._steps_per_run = 1\r\n\r\n def _set_steps_per_run(self, steps_per_run):\r\n self._steps_per_run = steps_per_run\r\n\r\n def begin(self):\r\n if self._summary_writer is None and self._output_dir:\r\n self._summary_writer = SummaryWriterCache.get(self._output_dir)\r\n self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access\r\n if self._global_step_tensor is None:\r\n raise RuntimeError(\r\n \"Global step should be created to use StepCounterHook.\")\r\n self._summary_tag = training_util.get_global_step().op.name + \"/sec\"\r\n\r\n def before_run(self, run_context): # pylint: disable=unused-argument\r\n return SessionRunArgs(self._global_step_tensor)\r\n\r\n def _log_and_record(self, elapsed_steps, elapsed_time, global_step):\r\n steps_per_sec = elapsed_steps / elapsed_time\r\n if self._summary_writer is not None:\r\n summary = Summary(value=[\r\n Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec)\r\n ])\r\n self._summary_writer.add_summary(summary, global_step)\r\n logging.info(\"%s: %g\", self._summary_tag, steps_per_sec)\r\n\r\n def after_run(self, run_context, run_values):\r\n _ = run_context\r\n\r\n stale_global_step = run_values.results\r\n if self._timer.should_trigger_for_step(stale_global_step +\r\n self._steps_per_run):\r\n # get the real value after train op.\r\n global_step = run_context.session.run(self._global_step_tensor)\r\n if self._timer.should_trigger_for_step(global_step):\r\n elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(\r\n global_step)\r\n if elapsed_time is not None:\r\n self._log_and_record(elapsed_steps, elapsed_time, global_step)\r\n\r\n # Check whether the global step has been increased. Here, we do not use the\r\n # timer.last_triggered_step as the timer might record a different global\r\n # step value such that the comparison could be unreliable. For simplicity,\r\n # we just compare the stale_global_step with previously recorded version.\r\n if stale_global_step == self._last_global_step:\r\n # Here, we give a warning in the first 5 times if we have observed that\r\n # the global step has not been increased. For some Optimizers, the global\r\n # step is not increased each time by design. For example,\r\n # SyncReplicaOptimizer doesn't increase the global step in worker's main\r\n # train step.\r\n logging.log_first_n(\r\n logging.WARN,\r\n \"It seems that global step (tf.train.get_global_step) has not \"\r\n \"been increased. Current value (could be stable): %s vs previous \"\r\n \"value: %s. You could increase the global step by passing \"\r\n \"tf.train.get_global_step() to Optimizer.apply_gradients or \"\r\n \"Optimizer.minimize.\", 5, stale_global_step, self._last_global_step)\r\n\r\n self._last_global_step = stale_global_step\r\n\r\n\r\n@tf_export(v1=[\"train.NanLossDuringTrainingError\"])\r\nclass NanLossDuringTrainingError(RuntimeError):\r\n\r\n def __str__(self):\r\n return \"NaN loss during training.\"\r\n\r\n\r\n@tf_export(v1=[\"train.NanTensorHook\"])\r\nclass NanTensorHook(session_run_hook.SessionRunHook):\r\n \"\"\"Monitors the loss tensor and stops training if loss is NaN.\r\n\r\n Can either fail with exception or just stop training.\r\n \"\"\"\r\n\r\n def __init__(self, loss_tensor, fail_on_nan_loss=True):\r\n \"\"\"Initializes a `NanTensorHook`.\r\n\r\n Args:\r\n loss_tensor: `Tensor`, the loss tensor.\r\n fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.\r\n \"\"\"\r\n self._loss_tensor = loss_tensor\r\n self._fail_on_nan_loss = fail_on_nan_loss\r\n\r\n def before_run(self, run_context): # pylint: disable=unused-argument\r\n return SessionRunArgs(self._loss_tensor)\r\n\r\n def after_run(self, run_context, run_values):\r\n if np.isnan(run_values.results):\r\n failure_message = \"Model diverged with loss = NaN.\"\r\n if self._fail_on_nan_loss:\r\n logging.error(failure_message)\r\n raise NanLossDuringTrainingError\r\n else:\r\n logging.warning(failure_message)\r\n # We don't raise an error but we request stop without an exception.\r\n run_context.request_stop()\r\n\r\n\r\n@tf_export(v1=[\"train.SummarySaverHook\"])\r\nclass SummarySaverHook(session_run_hook.SessionRunHook):\r\n \"\"\"Saves summaries every N steps.\"\"\"\r\n\r\n def __init__(self,\r\n save_steps=None,\r\n save_secs=None,\r\n output_dir=None,\r\n summary_writer=None,\r\n scaffold=None,\r\n summary_op=None):\r\n \"\"\"Initializes a `SummarySaverHook`.\r\n\r\n Args:\r\n save_steps: `int`, save summaries every N steps. Exactly one of\r\n `save_secs` and `save_steps` should be set.\r\n save_secs: `int`, save summaries every N seconds.\r\n output_dir: `string`, the directory to save the summaries to. Only used if\r\n no `summary_writer` is supplied.\r\n summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,\r\n one will be created accordingly.\r\n scaffold: `Scaffold` to get summary_op if it's not provided.\r\n summary_op: `Tensor` of type `string` containing the serialized `Summary`\r\n protocol buffer or a list of `Tensor`. They are most likely an output by\r\n TF summary methods like `tf.compat.v1.summary.scalar` or\r\n `tf.compat.v1.summary.merge_all`. It can be passed in as one tensor; if\r\n more than one, they must be passed in as a list.\r\n\r\n Raises:\r\n ValueError: Exactly one of scaffold or summary_op should be set.\r\n \"\"\"\r\n if ((scaffold is None and summary_op is None) or\r\n (scaffold is not None and summary_op is not None)):\r\n raise ValueError(\r\n \"Exactly one of scaffold or summary_op must be provided.\")\r\n self._summary_op = summary_op\r\n self._summary_writer = summary_writer\r\n self._output_dir = output_dir\r\n self._scaffold = scaffold\r\n self._timer = SecondOrStepTimer(\r\n every_secs=save_secs, every_steps=save_steps)\r\n # TODO(mdan): Throw an error if output_dir and summary_writer are None.\r\n\r\n def begin(self):\r\n if self._summary_writer is None and self._output_dir:\r\n self._summary_writer = SummaryWriterCache.get(self._output_dir)\r\n self._next_step = None\r\n self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access\r\n if self._global_step_tensor is None:\r\n raise RuntimeError(\r\n \"Global step should be created to use SummarySaverHook.\")\r\n\r\n def before_run(self, run_context): # pylint: disable=unused-argument\r\n self._request_summary = (\r\n self._next_step is None or\r\n self._timer.should_trigger_for_step(self._next_step))\r\n requests = {\"global_step\": self._global_step_tensor}\r\n if self._request_summary:\r\n if self._get_summary_op() is not None:\r\n requests[\"summary\"] = self._get_summary_op()\r\n\r\n return SessionRunArgs(requests)\r\n\r\n def after_run(self, run_context, run_values):\r\n _ = run_context\r\n if not self._summary_writer:\r\n return\r\n\r\n stale_global_step = run_values.results[\"global_step\"]\r\n global_step = stale_global_step + 1\r\n if self._next_step is None or self._request_summary:\r\n global_step = run_context.session.run(self._global_step_tensor)\r\n\r\n if self._next_step is None:\r\n self._summary_writer.add_session_log(\r\n SessionLog(status=SessionLog.START), global_step)\r\n\r\n if self._request_summary:\r\n self._timer.update_last_triggered_step(global_step)\r\n if \"summary\" in run_values.results:\r\n for summary in run_values.results[\"summary\"]:\r\n self._summary_writer.add_summary(summary, global_step)\r\n\r\n self._next_step = global_step + 1\r\n\r\n def end(self, session=None):\r\n if self._summary_writer:\r\n self._summary_writer.flush()\r\n\r\n def _get_summary_op(self):\r\n \"\"\"Fetches the summary op either from self._summary_op or self._scaffold.\r\n\r\n Returns:\r\n Returns a list of summary `Tensor`.\r\n \"\"\"\r\n summary_op = None\r\n if self._summary_op is not None:\r\n summary_op = self._summary_op\r\n elif self._scaffold.summary_op is not None:\r\n summary_op = self._scaffold.summary_op\r\n\r\n if summary_op is None:\r\n return None\r\n\r\n if not isinstance(summary_op, list):\r\n return [summary_op]\r\n return summary_op\r\n\r\n\r\n@tf_export(v1=[\"train.GlobalStepWaiterHook\"])\r\nclass GlobalStepWaiterHook(session_run_hook.SessionRunHook):\r\n \"\"\"Delays execution until global step reaches `wait_until_step`.\r\n\r\n This hook delays execution until global step reaches to `wait_until_step`. It\r\n is used to gradually start workers in distributed settings. One example usage\r\n would be setting `wait_until_step=int(K*log(task_id+1))` assuming that\r\n task_id=0 is the chief.\r\n \"\"\"\r\n\r\n def __init__(self, wait_until_step):\r\n \"\"\"Initializes a `GlobalStepWaiterHook`.\r\n\r\n Args:\r\n wait_until_step: an `int` shows until which global step should we wait.\r\n \"\"\"\r\n self._wait_until_step = wait_until_step\r\n\r\n def begin(self):\r\n self._worker_is_started = False\r\n self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access\r\n if self._global_step_tensor is None:\r\n raise RuntimeError(\r\n \"Global step should be created to use _GlobalStepWaiterHook.\")\r\n\r\n def before_run(self, run_context):\r\n if self._worker_is_started:\r\n return None\r\n\r\n if self._wait_until_step <= 0:\r\n self._worker_is_started = True\r\n return None\r\n\r\n logging.info(\"Waiting for global step %d before starting training.\",\r\n self._wait_until_step)\r\n last_logged_step = 0\r\n while True:\r\n current_step = run_context.session.run(self._global_step_tensor)\r\n if current_step >= self._wait_until_step:\r\n self._worker_is_started = True\r\n return None\r\n if current_step - last_logged_step > 1000:\r\n logging.info(\r\n \"Waiting for global step %d before starting training. \"\r\n \"Current step is %d.\", self._wait_until_step, current_step)\r\n last_logged_step = current_step\r\n time.sleep(0.5)\r\n\r\n\r\n@tf_export(v1=[\"train.FinalOpsHook\"])\r\nclass FinalOpsHook(session_run_hook.SessionRunHook):\r\n \"\"\"A hook which evaluates `Tensors` at the end of a session.\"\"\"\r\n\r\n def __init__(self, final_ops, final_ops_feed_dict=None):\r\n \"\"\"Initializes `FinalOpHook` with ops to run at the end of the session.\r\n\r\n Args:\r\n final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names\r\n to `Tensors`.\r\n final_ops_feed_dict: A feed dictionary to use when running\r\n `final_ops_dict`.\r\n \"\"\"\r\n self._final_ops = final_ops\r\n self._final_ops_feed_dict = final_ops_feed_dict\r\n self._final_ops_values = None\r\n\r\n @property\r\n def final_ops_values(self):\r\n return self._final_ops_values\r\n\r\n def end(self, session):\r\n if self._final_ops is not None:\r\n try:\r\n self._final_ops_values = session.run(\r\n self._final_ops, feed_dict=self._final_ops_feed_dict)\r\n except (errors.OutOfRangeError, StopIteration) as e:\r\n logging.warning(\r\n \"An OutOfRangeError or StopIteration exception is raised by the \"\r\n \"code in FinalOpsHook. This typically means the Ops running by the \"\r\n \"FinalOpsHook have a dependency back to some input source, which \"\r\n \"should not happen. For example, for metrics in \"\r\n \"tf.estimator.Estimator, all metrics functions return two Ops: \"\r\n \"`value_op` and `update_op`. Estimator.evaluate calls the \"\r\n \"`update_op` for each batch of the data in input source and, once \"\r\n \"it is exhausted, it call the `value_op` to get the metric values. \"\r\n \"The `value_op` here should have dependency back to variables \"\r\n \"reading only, rather than reading another batch from input. \"\r\n \"Otherwise, the `value_op`, executed by `FinalOpsHook`, triggers \"\r\n \"another data reading, which ends OutOfRangeError/StopIteration. \"\r\n \"Please fix that.\")\r\n raise e\r\n\r\n\r\n@tf_export(v1=[\"train.FeedFnHook\"])\r\nclass FeedFnHook(session_run_hook.SessionRunHook):\r\n \"\"\"Runs `feed_fn` and sets the `feed_dict` accordingly.\"\"\"\r\n\r\n def __init__(self, feed_fn):\r\n \"\"\"Initializes a `FeedFnHook`.\r\n\r\n Args:\r\n feed_fn: function that takes no arguments and returns `dict` of `Tensor`\r\n to feed.\r\n \"\"\"\r\n self.feed_fn = feed_fn\r\n\r\n def before_run(self, run_context): # pylint: disable=unused-argument\r\n return session_run_hook.SessionRunArgs(\r\n fetches=None, feed_dict=self.feed_fn())\r\n\r\n\r\n@tf_export(v1=[\"train.ProfilerHook\"])\r\nclass ProfilerHook(session_run_hook.SessionRunHook):\r\n \"\"\"Captures CPU/GPU profiling information every N steps or seconds.\r\n\r\n This produces files called \"timeline-<step>.json\", which are in Chrome\r\n Trace format.\r\n\r\n For more information see:\r\n https://github.com/catapult-project/catapult/blob/master/tracing/README.md\r\n \"\"\"\r\n\r\n def __init__(self,\r\n save_steps=None,\r\n save_secs=None,\r\n output_dir=\"\",\r\n show_dataflow=True,\r\n show_memory=False):\r\n \"\"\"Initializes a hook that takes periodic profiling snapshots.\r\n\r\n `options.run_metadata` argument of `tf.Session.Run` is used to collect\r\n metadata about execution. This hook sets the metadata and dumps it in Chrome\r\n Trace format.\r\n\r\n\r\n Args:\r\n save_steps: `int`, save profile traces every N steps. Exactly one of\r\n `save_secs` and `save_steps` should be set.\r\n save_secs: `int` or `float`, save profile traces every N seconds.\r\n output_dir: `string`, the directory to save the profile traces to.\r\n Defaults to the current directory.\r\n show_dataflow: `bool`, if True, add flow events to the trace connecting\r\n producers and consumers of tensors.\r\n show_memory: `bool`, if True, add object snapshot events to the trace\r\n showing the sizes and lifetimes of tensors.\r\n \"\"\"\r\n self._output_file = os.path.join(output_dir, \"timeline-{}.json\")\r\n self._file_writer = SummaryWriterCache.get(output_dir)\r\n self._show_dataflow = show_dataflow\r\n self._show_memory = show_memory\r\n self._timer = SecondOrStepTimer(\r\n every_secs=save_secs, every_steps=save_steps)\r\n\r\n def begin(self):\r\n self._next_step = None\r\n self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access\r\n if self._global_step_tensor is None:\r\n raise RuntimeError(\"Global step should be created to use ProfilerHook.\")\r\n\r\n def before_run(self, run_context):\r\n self._request_summary = (\r\n self._next_step is not None and\r\n self._timer.should_trigger_for_step(self._next_step))\r\n requests = {\"global_step\": self._global_step_tensor}\r\n opts = (\r\n config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)\r\n if self._request_summary else None)\r\n\r\n return SessionRunArgs(requests, options=opts)\r\n\r\n def after_run(self, run_context, run_values):\r\n stale_global_step = run_values.results[\"global_step\"]\r\n if self._next_step is None:\r\n # Update the timer so that it does not activate until N steps or seconds\r\n # have passed.\r\n self._timer.update_last_triggered_step(stale_global_step)\r\n global_step = stale_global_step + 1\r\n if self._request_summary:\r\n global_step = run_context.session.run(self._global_step_tensor)\r\n self._timer.update_last_triggered_step(global_step)\r\n self._save(global_step, self._output_file.format(global_step),\r\n run_values.run_metadata.step_stats)\r\n self._file_writer.add_run_metadata(run_values.run_metadata,\r\n \"step_%d\" % global_step)\r\n\r\n self._next_step = global_step + 1\r\n\r\n def _save(self, step, save_path, step_stats):\r\n logging.info(\"Saving timeline for %d into '%s'.\", step, save_path)\r\n with gfile.Open(save_path, \"w\") as f:\r\n trace = timeline.Timeline(step_stats)\r\n f.write(\r\n trace.generate_chrome_trace_format(\r\n show_dataflow=self._show_dataflow, show_memory=self._show_memory))\r\n\r\n\r\ndef _as_graph_element(obj):\r\n \"\"\"Retrieves Graph element.\"\"\"\r\n graph = ops.get_default_graph()\r\n if not isinstance(obj, six.string_types):\r\n if not hasattr(obj, \"graph\") or obj.graph != graph:\r\n raise ValueError(\"Passed %s should have graph attribute that is equal \"\r\n \"to current graph %s.\" % (obj, graph))\r\n return obj\r\n if \":\" in obj:\r\n element = graph.as_graph_element(obj)\r\n else:\r\n element = graph.as_graph_element(obj + \":0\")\r\n # Check that there is no :1 (e.g. it's single output).\r\n try:\r\n graph.as_graph_element(obj + \":1\")\r\n except (KeyError, ValueError):\r\n pass\r\n else:\r\n raise ValueError(\"Name %s is ambiguous, \"\r\n \"as this `Operation` has multiple outputs \"\r\n \"(at least 2).\" % obj)\r\n return element\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"`tf.Learn` components for `TPUGANEstimator`.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.contrib.gan.python.estimator.python import tpu_gan_estimator_impl\r\n# pylint: disable=wildcard-import\r\nfrom tensorflow.contrib.gan.python.estimator.python.tpu_gan_estimator_impl import *\r\n# pylint: enable=wildcard-import\r\nfrom tensorflow.python.util.all_util import remove_undocumented\r\n\r\n__all__ = tpu_gan_estimator_impl.__all__\r\nremove_undocumented(__name__, __all__)\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Metrics namespace.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\n# pylint:disable=wildcard-import\r\nfrom tensorflow.contrib.eager.python.metrics_impl import *\r\nfrom tensorflow.python.util.all_util import remove_undocumented\r\n\r\n_allowed_symbols = ['Accuracy', 'Mean', 'Metric', 'CategoricalAccuracy',\r\n 'BinaryAccuracy', 'SparseAccuracy']\r\nremove_undocumented(__name__, _allowed_symbols)\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for cross_device_utils.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom absl.testing import parameterized\r\n\r\nfrom tensorflow.python.distribute import combinations\r\nfrom tensorflow.python.distribute import cross_device_utils\r\nfrom tensorflow.python.distribute import device_util\r\nfrom tensorflow.python.distribute import values as value_lib\r\nfrom tensorflow.python.eager import test\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.ops import math_ops\r\n\r\n\r\nclass IndexedSlicesUtilsTest(test.TestCase, parameterized.TestCase):\r\n\r\n def _assert_values_equal(self, left, right):\r\n self.assertAllEqual(\r\n self.evaluate(ops.convert_to_tensor(left)),\r\n self.evaluate(ops.convert_to_tensor(right)))\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testAggregateTensors(self):\r\n t0 = constant_op.constant([[1., 2.], [0, 0], [3., 4.]])\r\n t1 = constant_op.constant([[0., 0.], [5, 6], [7., 8.]])\r\n total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])\r\n result = cross_device_utils.aggregate_tensors_or_indexed_slices([t0, t1])\r\n self._assert_values_equal(total, result)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testAggregateIndexedSlices(self):\r\n t0 = math_ops._as_indexed_slices(\r\n constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))\r\n t1 = math_ops._as_indexed_slices(\r\n constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))\r\n total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])\r\n result = cross_device_utils.aggregate_tensors_or_indexed_slices([t0, t1])\r\n self.assertIsInstance(result, ops.IndexedSlices)\r\n self._assert_values_equal(total, result)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testDivideTensor(self):\r\n t = constant_op.constant([[1., 2.], [0, 0], [3., 4.]])\r\n n = 2\r\n expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])\r\n result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)\r\n self._assert_values_equal(expected, result)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testDivideIndexedSlices(self):\r\n t = math_ops._as_indexed_slices(\r\n constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))\r\n n = 2\r\n expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])\r\n result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)\r\n self.assertIsInstance(result, ops.IndexedSlices)\r\n self._assert_values_equal(expected, result)\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testIsIndexedSlices(self):\r\n t = math_ops._as_indexed_slices(\r\n constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))\r\n self.assertTrue(cross_device_utils.contains_indexed_slices(t))\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testContainsIndexedSlices_List(self):\r\n t0 = math_ops._as_indexed_slices(\r\n constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))\r\n t1 = math_ops._as_indexed_slices(\r\n constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))\r\n self.assertTrue(cross_device_utils.contains_indexed_slices([t0, t1]))\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testContainsIndexedSlices_Tuple(self):\r\n t0 = math_ops._as_indexed_slices(\r\n constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))\r\n t1 = math_ops._as_indexed_slices(\r\n constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))\r\n self.assertTrue(cross_device_utils.contains_indexed_slices((t0, t1)))\r\n\r\n @test_util.run_in_graph_and_eager_modes\r\n def testContainsIndexedSlices_PerReplica(self):\r\n t0 = math_ops._as_indexed_slices(\r\n constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))\r\n t1 = math_ops._as_indexed_slices(\r\n constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))\r\n device_map = value_lib.ReplicaDeviceMap((\"/gpu:0\", \"/cpu:0\"))\r\n per_replica = value_lib.PerReplica(device_map, (t0, t1))\r\n self.assertTrue(cross_device_utils.contains_indexed_slices(per_replica))\r\n\r\n @combinations.generate(combinations.combine(\r\n mode=[\"graph\", \"eager\"],\r\n required_gpus=1))\r\n def testCopyTensor(self):\r\n with ops.device(\"/cpu:0\"):\r\n t = constant_op.constant([[1., 2.], [0, 0], [3., 4.]])\r\n destination = \"/gpu:0\"\r\n result = cross_device_utils.copy_tensor_or_indexed_slices_to_device(\r\n t, destination)\r\n\r\n self._assert_values_equal(t, result)\r\n self.assertEqual(device_util.resolve(destination),\r\n device_util.resolve(result.device))\r\n\r\n @combinations.generate(combinations.combine(\r\n mode=[\"graph\", \"eager\"],\r\n required_gpus=1))\r\n def testCopyIndexedSlices(self):\r\n with ops.device(\"/cpu:0\"):\r\n t = math_ops._as_indexed_slices(\r\n constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))\r\n destination = \"/gpu:0\"\r\n result = cross_device_utils.copy_tensor_or_indexed_slices_to_device(\r\n t, destination)\r\n\r\n self.assertIsInstance(result, ops.IndexedSlices)\r\n self._assert_values_equal(t, result)\r\n self.assertEqual(device_util.resolve(destination),\r\n device_util.resolve(result.device))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"A more advanced example, of building an RNN-based time series model.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport functools\r\nfrom os import path\r\nimport tempfile\r\n\r\nimport numpy\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators\r\nfrom tensorflow.contrib.timeseries.python.timeseries import model as ts_model\r\nfrom tensorflow.contrib.timeseries.python.timeseries import state_management\r\n\r\ntry:\r\n import matplotlib # pylint: disable=g-import-not-at-top\r\n matplotlib.use(\"TkAgg\") # Need Tk for interactive plots.\r\n from matplotlib import pyplot # pylint: disable=g-import-not-at-top\r\n HAS_MATPLOTLIB = True\r\nexcept ImportError:\r\n # Plotting requires matplotlib, but the unit test running this code may\r\n # execute in an environment without it (i.e. matplotlib is not a build\r\n # dependency). We'd still like to test the TensorFlow-dependent parts of this\r\n # example.\r\n HAS_MATPLOTLIB = False\r\n\r\n_MODULE_PATH = path.dirname(__file__)\r\n_DATA_FILE = path.join(_MODULE_PATH, \"data/multivariate_periods.csv\")\r\n\r\n\r\nclass _LSTMModel(ts_model.SequentialTimeSeriesModel):\r\n \"\"\"A time series model-building example using an RNNCell.\"\"\"\r\n\r\n def __init__(self, num_units, num_features, exogenous_feature_columns=None,\r\n dtype=tf.float32):\r\n \"\"\"Initialize/configure the model object.\r\n\r\n Note that we do not start graph building here. Rather, this object is a\r\n configurable factory for TensorFlow graphs which are run by an Estimator.\r\n\r\n Args:\r\n num_units: The number of units in the model's LSTMCell.\r\n num_features: The dimensionality of the time series (features per\r\n timestep).\r\n exogenous_feature_columns: A list of `tf.feature_column`s representing\r\n features which are inputs to the model but are not predicted by\r\n it. These must then be present for training, evaluation, and\r\n prediction.\r\n dtype: The floating point data type to use.\r\n \"\"\"\r\n super(_LSTMModel, self).__init__(\r\n # Pre-register the metrics we'll be outputting (just a mean here).\r\n train_output_names=[\"mean\"],\r\n predict_output_names=[\"mean\"],\r\n num_features=num_features,\r\n exogenous_feature_columns=exogenous_feature_columns,\r\n dtype=dtype)\r\n self._num_units = num_units\r\n # Filled in by initialize_graph()\r\n self._lstm_cell = None\r\n self._lstm_cell_run = None\r\n self._predict_from_lstm_output = None\r\n\r\n def initialize_graph(self, input_statistics=None):\r\n \"\"\"Save templates for components, which can then be used repeatedly.\r\n\r\n This method is called every time a new graph is created. It's safe to start\r\n adding ops to the current default graph here, but the graph should be\r\n constructed from scratch.\r\n\r\n Args:\r\n input_statistics: A math_utils.InputStatistics object.\r\n \"\"\"\r\n super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)\r\n with tf.variable_scope(\"\", use_resource=True):\r\n # Use ResourceVariables to avoid race conditions.\r\n self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)\r\n # Create templates so we don't have to worry about variable reuse.\r\n self._lstm_cell_run = tf.make_template(\r\n name_=\"lstm_cell\",\r\n func_=self._lstm_cell,\r\n create_scope_now_=True)\r\n # Transforms LSTM output into mean predictions.\r\n self._predict_from_lstm_output = tf.make_template(\r\n name_=\"predict_from_lstm_output\",\r\n func_=functools.partial(tf.layers.dense, units=self.num_features),\r\n create_scope_now_=True)\r\n\r\n def get_start_state(self):\r\n \"\"\"Return initial state for the time series model.\"\"\"\r\n return (\r\n # Keeps track of the time associated with this state for error checking.\r\n tf.zeros([], dtype=tf.int64),\r\n # The previous observation or prediction.\r\n tf.zeros([self.num_features], dtype=self.dtype),\r\n # The most recently seen exogenous features.\r\n tf.zeros(self._get_exogenous_embedding_shape(), dtype=self.dtype),\r\n # The state of the RNNCell (batch dimension removed since this parent\r\n # class will broadcast).\r\n [tf.squeeze(state_element, axis=0)\r\n for state_element\r\n in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])\r\n\r\n def _filtering_step(self, current_times, current_values, state, predictions):\r\n \"\"\"Update model state based on observations.\r\n\r\n Note that we don't do much here aside from computing a loss. In this case\r\n it's easier to update the RNN state in _prediction_step, since that covers\r\n running the RNN both on observations (from this method) and our own\r\n predictions. This distinction can be important for probabilistic models,\r\n where repeatedly predicting without filtering should lead to low-confidence\r\n predictions.\r\n\r\n Args:\r\n current_times: A [batch size] integer Tensor.\r\n current_values: A [batch size, self.num_features] floating point Tensor\r\n with new observations.\r\n state: The model's state tuple.\r\n predictions: The output of the previous `_prediction_step`.\r\n Returns:\r\n A tuple of new state and a predictions dictionary updated to include a\r\n loss (note that we could also return other measures of goodness of fit,\r\n although only \"loss\" will be optimized).\r\n \"\"\"\r\n state_from_time, prediction, exogenous, lstm_state = state\r\n with tf.control_dependencies(\r\n [tf.assert_equal(current_times, state_from_time)]):\r\n # Subtract the mean and divide by the variance of the series. Slightly\r\n # more efficient if done for a whole window (using the normalize_features\r\n # argument to SequentialTimeSeriesModel).\r\n transformed_values = self._scale_data(current_values)\r\n # Use mean squared error across features for the loss.\r\n predictions[\"loss\"] = tf.reduce_mean(\r\n (prediction - transformed_values) ** 2, axis=-1)\r\n # Keep track of the new observation in model state. It won't be run\r\n # through the LSTM until the next _imputation_step.\r\n new_state_tuple = (current_times, transformed_values,\r\n exogenous, lstm_state)\r\n return (new_state_tuple, predictions)\r\n\r\n def _prediction_step(self, current_times, state):\r\n \"\"\"Advance the RNN state using a previous observation or prediction.\"\"\"\r\n _, previous_observation_or_prediction, exogenous, lstm_state = state\r\n # Update LSTM state based on the most recent exogenous and endogenous\r\n # features.\r\n inputs = tf.concat([previous_observation_or_prediction, exogenous],\r\n axis=-1)\r\n lstm_output, new_lstm_state = self._lstm_cell_run(\r\n inputs=inputs, state=lstm_state)\r\n next_prediction = self._predict_from_lstm_output(lstm_output)\r\n new_state_tuple = (current_times, next_prediction,\r\n exogenous, new_lstm_state)\r\n return new_state_tuple, {\"mean\": self._scale_back_data(next_prediction)}\r\n\r\n def _imputation_step(self, current_times, state):\r\n \"\"\"Advance model state across a gap.\"\"\"\r\n # Does not do anything special if we're jumping across a gap. More advanced\r\n # models, especially probabilistic ones, would want a special case that\r\n # depends on the gap size.\r\n return state\r\n\r\n def _exogenous_input_step(\r\n self, current_times, current_exogenous_regressors, state):\r\n \"\"\"Save exogenous regressors in model state for use in _prediction_step.\"\"\"\r\n state_from_time, prediction, _, lstm_state = state\r\n return (state_from_time, prediction,\r\n current_exogenous_regressors, lstm_state)\r\n\r\n\r\ndef train_and_predict(\r\n csv_file_name=_DATA_FILE, training_steps=200, estimator_config=None,\r\n export_directory=None):\r\n \"\"\"Train and predict using a custom time series model.\"\"\"\r\n # Construct an Estimator from our LSTM model.\r\n categorical_column = tf.feature_column.categorical_column_with_hash_bucket(\r\n key=\"categorical_exogenous_feature\", hash_bucket_size=16)\r\n exogenous_feature_columns = [\r\n # Exogenous features are not part of the loss, but can inform\r\n # predictions. In this example the features have no extra information, but\r\n # are included as an API example.\r\n tf.feature_column.numeric_column(\r\n \"2d_exogenous_feature\", shape=(2,)),\r\n tf.feature_column.embedding_column(\r\n categorical_column=categorical_column, dimension=10)]\r\n estimator = ts_estimators.TimeSeriesRegressor(\r\n model=_LSTMModel(\r\n num_features=5,\r\n num_units=128,\r\n exogenous_feature_columns=exogenous_feature_columns),\r\n optimizer=tf.compat.v1.train.AdamOptimizer(0.001),\r\n config=estimator_config,\r\n # Set state to be saved across windows.\r\n state_manager=state_management.ChainingStateManager())\r\n reader = tf.contrib.timeseries.CSVReader(\r\n csv_file_name,\r\n column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)\r\n + (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5\r\n + (\"2d_exogenous_feature\",) * 2\r\n + (\"categorical_exogenous_feature\",)),\r\n # Data types other than for `times` need to be specified if they aren't\r\n # float32. In this case one of our exogenous features has string dtype.\r\n column_dtypes=((tf.int64,) + (tf.float32,) * 7 + (tf.string,)))\r\n train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(\r\n reader, batch_size=4, window_size=32)\r\n estimator.train(input_fn=train_input_fn, steps=training_steps)\r\n evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)\r\n evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)\r\n # Predict starting after the evaluation\r\n predict_exogenous_features = {\r\n \"2d_exogenous_feature\": numpy.concatenate(\r\n [numpy.ones([1, 100, 1]), numpy.zeros([1, 100, 1])],\r\n axis=-1),\r\n \"categorical_exogenous_feature\": numpy.array(\r\n [\"strkey\"] * 100)[None, :, None]}\r\n (predictions,) = tuple(estimator.predict(\r\n input_fn=tf.contrib.timeseries.predict_continuation_input_fn(\r\n evaluation, steps=100,\r\n exogenous_features=predict_exogenous_features)))\r\n times = evaluation[\"times\"][0]\r\n observed = evaluation[\"observed\"][0, :, :]\r\n predicted_mean = numpy.squeeze(numpy.concatenate(\r\n [evaluation[\"mean\"][0], predictions[\"mean\"]], axis=0))\r\n all_times = numpy.concatenate([times, predictions[\"times\"]], axis=0)\r\n\r\n # Export the model in SavedModel format. We include a bit of extra boilerplate\r\n # for \"cold starting\" as if we didn't have any state from the Estimator, which\r\n # is the case when serving from a SavedModel. If Estimator output is\r\n # available, the result of \"Estimator.evaluate\" can be passed directly to\r\n # `tf.contrib.timeseries.saved_model_utils.predict_continuation` as the\r\n # `continue_from` argument.\r\n with tf.Graph().as_default():\r\n filter_feature_tensors, _ = evaluation_input_fn()\r\n with tf.train.MonitoredSession() as session:\r\n # Fetch the series to \"warm up\" our state, which will allow us to make\r\n # predictions for its future values. This is just a dictionary of times,\r\n # values, and exogenous features mapping to numpy arrays. The use of an\r\n # input_fn is just a convenience for the example; they can also be\r\n # specified manually.\r\n filter_features = session.run(filter_feature_tensors)\r\n if export_directory is None:\r\n export_directory = tempfile.mkdtemp()\r\n input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()\r\n export_location = estimator.export_saved_model(export_directory,\r\n input_receiver_fn)\r\n # Warm up and predict using the SavedModel\r\n with tf.Graph().as_default():\r\n with tf.compat.v1.Session() as session:\r\n signatures = tf.saved_model.loader.load(\r\n session, [tf.saved_model.tag_constants.SERVING], export_location)\r\n state = tf.contrib.timeseries.saved_model_utils.cold_start_filter(\r\n signatures=signatures, session=session, features=filter_features)\r\n saved_model_output = (\r\n tf.contrib.timeseries.saved_model_utils.predict_continuation(\r\n continue_from=state, signatures=signatures,\r\n session=session, steps=100,\r\n exogenous_features=predict_exogenous_features))\r\n # The exported model gives the same results as the Estimator.predict()\r\n # call above.\r\n numpy.testing.assert_allclose(\r\n predictions[\"mean\"],\r\n numpy.squeeze(saved_model_output[\"mean\"], axis=0))\r\n return times, observed, all_times, predicted_mean\r\n\r\n\r\ndef main(unused_argv):\r\n if not HAS_MATPLOTLIB:\r\n raise ImportError(\r\n \"Please install matplotlib to generate a plot from this example.\")\r\n (observed_times, observations,\r\n all_times, predictions) = train_and_predict()\r\n pyplot.axvline(99, linestyle=\"dotted\")\r\n observed_lines = pyplot.plot(\r\n observed_times, observations, label=\"Observed\", color=\"k\")\r\n predicted_lines = pyplot.plot(\r\n all_times, predictions, label=\"Predicted\", color=\"b\")\r\n pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],\r\n loc=\"upper left\")\r\n pyplot.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n tf.compat.v1.app.run(main=main)\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for Keras metrics.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom absl.testing import parameterized\r\nfrom tensorflow.python.data.ops import dataset_ops\r\nfrom tensorflow.python.distribute import combinations\r\nfrom tensorflow.python.distribute import strategy_combinations\r\nfrom tensorflow.python.eager import test\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.keras import metrics\r\nfrom tensorflow.python.ops import math_ops\r\n\r\n\r\ndef _labeled_dataset_fn():\r\n # First four batches of x: labels, predictions -> (labels == predictions)\r\n # 0: 0, 0 -> True; 1: 1, 1 -> True; 2: 2, 2 -> True; 3: 3, 0 -> False\r\n # 4: 4, 1 -> False; 5: 0, 2 -> False; 6: 1, 0 -> False; 7: 2, 1 -> False\r\n # 8: 3, 2 -> False; 9: 4, 0 -> False; 10: 0, 1 -> False; 11: 1, 2 -> False\r\n # 12: 2, 0 -> False; 13: 3, 1 -> False; 14: 4, 2 -> False; 15: 0, 0 -> True\r\n return dataset_ops.Dataset.range(1000).map(\r\n lambda x: {\"labels\": x % 5, \"predictions\": x % 3}).batch(\r\n 4, drop_remainder=True)\r\n\r\n\r\ndef _boolean_dataset_fn():\r\n # First four batches of labels, predictions: {TP, FP, TN, FN}\r\n # with a threshold of 0.5:\r\n # T, T -> TP; F, T -> FP; T, F -> FN\r\n # F, F -> TN; T, T -> TP; F, T -> FP\r\n # T, F -> FN; F, F -> TN; T, T -> TP\r\n # F, T -> FP; T, F -> FN; F, F -> TN\r\n return dataset_ops.Dataset.from_tensor_slices({\r\n \"labels\": [True, False, True, False],\r\n \"predictions\": [True, True, False, False]}).repeat().batch(\r\n 3, drop_remainder=True)\r\n\r\n\r\ndef _threshold_dataset_fn():\r\n # First four batches of labels, predictions: {TP, FP, TN, FN}\r\n # with a threshold of 0.5:\r\n # True, 1.0 -> TP; False, .75 -> FP; True, .25 -> FN\r\n # False, 0.0 -> TN; True, 1.0 -> TP; False, .75 -> FP\r\n # True, .25 -> FN; False, 0.0 -> TN; True, 1.0 -> TP\r\n # False, .75 -> FP; True, .25 -> FN; False, 0.0 -> TN\r\n return dataset_ops.Dataset.from_tensor_slices({\r\n \"labels\": [True, False, True, False],\r\n \"predictions\": [1.0, 0.75, 0.25, 0.]}).repeat().batch(\r\n 3, drop_remainder=True)\r\n\r\n\r\ndef _regression_dataset_fn():\r\n return dataset_ops.Dataset.from_tensor_slices({\r\n \"labels\": [1., .5, 1., 0.],\r\n \"predictions\": [1., .75, .25, 0.]}).repeat()\r\n\r\n\r\ndef all_combinations():\r\n return combinations.combine(\r\n distribution=[\r\n strategy_combinations.default_strategy,\r\n strategy_combinations.one_device_strategy,\r\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\r\n strategy_combinations.mirrored_strategy_with_two_gpus,\r\n ],\r\n mode=[\"graph\"])\r\n\r\n\r\ndef tpu_combinations():\r\n return combinations.combine(\r\n distribution=[\r\n strategy_combinations.tpu_strategy_one_step,\r\n ],\r\n mode=[\"graph\"])\r\n\r\n\r\nclass KerasMetricsTest(test.TestCase, parameterized.TestCase):\r\n\r\n def _test_metric(self, distribution, dataset_fn, metric_init_fn, expected_fn):\r\n with ops.Graph().as_default(), distribution.scope():\r\n metric = metric_init_fn()\r\n\r\n iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn())\r\n updates = distribution.experimental_local_results(\r\n distribution.experimental_run_v2(\r\n metric, args=(iterator.get_next(),)))\r\n batches_per_update = distribution.num_replicas_in_sync\r\n\r\n self.evaluate(iterator.initialize())\r\n self.evaluate([v.initializer for v in metric.variables])\r\n\r\n batches_consumed = 0\r\n for i in range(4):\r\n batches_consumed += batches_per_update\r\n self.evaluate(updates)\r\n self.assertAllClose(expected_fn(batches_consumed),\r\n self.evaluate(metric.result()),\r\n 0.001,\r\n msg=\"After update #\" + str(i+1))\r\n if batches_consumed >= 4: # Consume 4 input batches in total.\r\n break\r\n\r\n @combinations.generate(all_combinations() + tpu_combinations())\r\n def testMean(self, distribution):\r\n def _dataset_fn():\r\n return dataset_ops.Dataset.range(1000).map(math_ops.to_float).batch(\r\n 4, drop_remainder=True)\r\n\r\n def _expected_fn(num_batches):\r\n # Mean(0..3) = 1.5, Mean(0..7) = 3.5, Mean(0..11) = 5.5, etc.\r\n return num_batches * 2 - 0.5\r\n\r\n self._test_metric(distribution, _dataset_fn, metrics.Mean, _expected_fn)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for basic building blocks used in eager mode RevNet.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport gc\r\nimport time\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.contrib.eager.python.examples.revnet import blocks_test\r\nfrom tensorflow.contrib.eager.python.examples.revnet import config as config_\r\nfrom tensorflow.contrib.eager.python.examples.revnet import revnet\r\nfrom tensorflow.python.client import device_lib\r\ntfe = tf.contrib.eager\r\n\r\n\r\ndef train_one_iter(model, inputs, labels, optimizer, global_step=None):\r\n \"\"\"Train for one iteration.\"\"\"\r\n logits, saved_hidden = model(inputs)\r\n grads, loss = model.compute_gradients(\r\n saved_hidden=saved_hidden, labels=labels)\r\n optimizer.apply_gradients(\r\n zip(grads, model.trainable_variables), global_step=global_step)\r\n\r\n return logits, loss\r\n\r\n\r\nclass RevNetTest(tf.test.TestCase):\r\n\r\n def setUp(self):\r\n super(RevNetTest, self).setUp()\r\n config = config_.get_hparams_cifar_38()\r\n config.add_hparam(\"n_classes\", 10)\r\n config.add_hparam(\"dataset\", \"cifar-10\")\r\n # Reconstruction could cause numerical error, use double precision for tests\r\n config.dtype = tf.float64\r\n config.fused = False # Fused batch norm does not support tf.float64\r\n # Reduce the batch size for tests because the OSS version runs\r\n # in constrained GPU environment with 1-2GB of memory.\r\n config.batch_size = 2\r\n shape = (config.batch_size,) + config.input_shape\r\n self.model = revnet.RevNet(config=config)\r\n self.x = tf.random_normal(shape=shape, dtype=tf.float64)\r\n self.t = tf.random_uniform(\r\n shape=[config.batch_size],\r\n minval=0,\r\n maxval=config.n_classes,\r\n dtype=tf.int64)\r\n self.config = config\r\n\r\n def tearDown(self):\r\n del self.model\r\n del self.x\r\n del self.t\r\n del self.config\r\n super(RevNetTest, self).tearDown()\r\n\r\n def test_call(self):\r\n \"\"\"Test `call` function.\"\"\"\r\n\r\n y, _ = self.model(self.x, training=False)\r\n self.assertEqual(y.shape, [self.config.batch_size, self.config.n_classes])\r\n\r\n def _check_grad_angle_combined(self, grads, grads_true):\r\n \"\"\"Verify that the reconstructed gradients has correct direction.\r\n\r\n Due to numerical imprecision, the magnitude may be slightly different.\r\n Yet according to the paper, the angle should be roughly the same.\r\n\r\n Args:\r\n grads: list of gradients from reconstruction\r\n grads_true: list of true gradients\r\n \"\"\"\r\n\r\n def _combine(gs):\r\n return [tf.reshape(g, [-1]) for g in gs]\r\n\r\n g1_all = tf.concat(_combine(grads), axis=0)\r\n g2_all = tf.concat(_combine(grads_true), axis=0)\r\n\r\n self.assertEqual(len(g1_all.shape), 1)\r\n self.assertEqual(len(g2_all.shape), 1)\r\n\r\n degree = blocks_test.compute_degree(g1_all, g2_all)\r\n self.assertLessEqual(degree, 1e0)\r\n\r\n def test_compute_gradients(self):\r\n \"\"\"Test `compute_gradients` function.\"\"\"\r\n _, saved_hidden = self.model(self.x) # Initialize model\r\n grads, loss = self.model.compute_gradients(\r\n saved_hidden=saved_hidden, labels=self.t)\r\n vars_ = self.model.trainable_variables\r\n self.assertTrue(isinstance(grads, list))\r\n self.assertTrue(isinstance(vars_, list))\r\n self.assertEqual(len(grads), len(vars_))\r\n for grad, var in zip(grads, vars_):\r\n self.assertEqual(grad.shape, var.shape)\r\n\r\n # Compare against the true gradient computed by the tape\r\n with tf.GradientTape() as tape:\r\n logits, _ = self.model(self.x)\r\n loss_true = self.model.compute_loss(logits=logits, labels=self.t)\r\n grads_true = tape.gradient(loss_true, vars_)\r\n self.assertAllClose(loss, loss_true)\r\n self.assertAllClose(grads, grads_true, rtol=1e-4, atol=1e-4)\r\n self._check_grad_angle_combined(grads, grads_true)\r\n\r\n def test_call_defun(self):\r\n \"\"\"Test `call` function with defun.\"\"\"\r\n y, _ = tfe.defun(self.model.call)(self.x, training=False)\r\n self.assertEqual(y.shape, [self.config.batch_size, self.config.n_classes])\r\n\r\n def test_compute_gradients_defun(self):\r\n \"\"\"Test `compute_gradients` function with defun.\"\"\"\r\n # TODO(apassos): make cond support returning None to let this happen with\r\n # tf.function.\r\n compute_gradients = tfe.defun(self.model.compute_gradients)\r\n _, saved_hidden = self.model(self.x)\r\n grads, _ = compute_gradients(saved_hidden=saved_hidden, labels=self.t)\r\n vars_ = self.model.trainable_variables\r\n self.assertTrue(isinstance(grads, list))\r\n self.assertTrue(isinstance(vars_, list))\r\n self.assertEqual(len(grads), len(vars_))\r\n for grad, var in zip(grads, vars_):\r\n if grad is not None:\r\n self.assertEqual(grad.shape, var.shape)\r\n\r\n def test_training_graph(self):\r\n \"\"\"Test model training in graph mode.\"\"\"\r\n with tf.Graph().as_default():\r\n config = config_.get_hparams_cifar_38()\r\n config.add_hparam(\"n_classes\", 10)\r\n config.add_hparam(\"dataset\", \"cifar-10\")\r\n\r\n x = tf.random_normal(\r\n shape=(self.config.batch_size,) + self.config.input_shape)\r\n t = tf.random_uniform(\r\n shape=(self.config.batch_size,),\r\n minval=0,\r\n maxval=self.config.n_classes,\r\n dtype=tf.int32)\r\n global_step = tf.Variable(0., trainable=False)\r\n model = revnet.RevNet(config=config)\r\n _, saved_hidden = model(x)\r\n grads, _ = model.compute_gradients(saved_hidden=saved_hidden, labels=t)\r\n optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)\r\n train_op = optimizer.apply_gradients(\r\n zip(grads, model.trainable_variables), global_step=global_step)\r\n\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n for _ in range(1):\r\n sess.run(train_op)\r\n\r\n\r\n# Benchmark related\r\ndef device_and_data_format():\r\n return (\"/gpu:0\",\r\n \"channels_first\") if tf.test.is_gpu_available() else (\"/cpu:0\",\r\n \"channels_last\")\r\n\r\n\r\ndef random_batch(batch_size, config):\r\n shape = (batch_size,) + config.input_shape\r\n images = tf.random_uniform(shape)\r\n labels = tf.random_uniform(\r\n [batch_size], minval=0, maxval=config.n_classes, dtype=tf.int32)\r\n\r\n return images, labels\r\n\r\n\r\nclass MockIterator(object):\r\n\r\n def __init__(self, tensors):\r\n self._tensors = [tf.identity(x) for x in tensors]\r\n\r\n def next(self):\r\n return self._tensors\r\n\r\n\r\nclass RevNetBenchmark(tf.test.Benchmark):\r\n \"\"\"Eager and graph benchmarks for RevNet.\"\"\"\r\n\r\n def _train_batch_sizes(self):\r\n \"\"\"Shamelessly copied from `resnet50_test.py`.\r\n\r\n Note: This is targeted towards ImageNet. CIFAR-10 should allow more\r\n aggressive batch sizes.\r\n\r\n Returns:\r\n A tuple of possible batch sizes\r\n \"\"\"\r\n for device in device_lib.list_local_devices():\r\n if tf.DeviceSpec.from_string(device.name).device_type == \"GPU\":\r\n if \"K20\" in device.physical_device_desc:\r\n return (16,)\r\n if \"P100\" in device.physical_device_desc:\r\n return (16, 32, 64)\r\n if tf.DeviceSpec.from_string(device.name).device_type == \"TPU\":\r\n return (32,)\r\n return (16, 32)\r\n\r\n def _force_device_sync(self):\r\n \"\"\"Shamelessly copied from `resnet50_test.py`.\"\"\"\r\n tf.constant(1.).cpu()\r\n\r\n def _report(self, label, start, num_iters, device, batch_size, data_format):\r\n avg_time = (time.time() - start) / num_iters\r\n dev = tf.DeviceSpec.from_string(device).device_type.lower()\r\n name = \"%s_%s_batch_%d_%s\" % (label, dev, batch_size, data_format)\r\n extras = {\"examples_per_sec\": batch_size / avg_time}\r\n self.report_benchmark(\r\n iters=num_iters, wall_time=avg_time, name=name, extras=extras)\r\n\r\n def _benchmark_eager_apply(self,\r\n label,\r\n device_and_format,\r\n defun=False,\r\n execution_mode=None):\r\n config = config_.get_hparams_imagenet_56()\r\n with tfe.execution_mode(execution_mode):\r\n device, data_format = device_and_format\r\n model = revnet.RevNet(config=config)\r\n if defun:\r\n # TODO(apassos): reenable after cond lets you return None\r\n model.call = tfe.defun(model.call)\r\n batch_size = 64\r\n num_burn = 5\r\n num_iters = 10\r\n with tf.device(device):\r\n images, _ = random_batch(batch_size, config)\r\n for _ in range(num_burn):\r\n model(images, training=False)\r\n if execution_mode:\r\n tfe.async_wait()\r\n gc.collect()\r\n start = time.time()\r\n for _ in range(num_iters):\r\n model(images, training=False)\r\n if execution_mode:\r\n tfe.async_wait()\r\n self._report(label, start, num_iters, device, batch_size, data_format)\r\n\r\n def benchmark_eager_apply_sync(self):\r\n self._benchmark_eager_apply(\r\n \"eager_apply_sync\", device_and_data_format(), defun=False)\r\n\r\n def benchmark_eager_apply_async(self):\r\n self._benchmark_eager_apply(\r\n \"eager_apply_async\",\r\n device_and_data_format(),\r\n defun=False,\r\n execution_mode=tfe.ASYNC)\r\n\r\n def benchmark_eager_call_defun(self):\r\n self._benchmark_eager_apply(\r\n \"eager_apply_with_defun\", device_and_data_format(), defun=True)\r\n\r\n def _benchmark_eager_train(self,\r\n label,\r\n make_iterator,\r\n device_and_format,\r\n defun=False,\r\n execution_mode=None):\r\n config = config_.get_hparams_imagenet_56()\r\n with tfe.execution_mode(execution_mode):\r\n device, data_format = device_and_format\r\n for batch_size in self._train_batch_sizes():\r\n (images, labels) = random_batch(batch_size, config)\r\n model = revnet.RevNet(config=config)\r\n optimizer = tf.train.GradientDescentOptimizer(0.1)\r\n if defun:\r\n model.call = tfe.function(model.call)\r\n\r\n num_burn = 3\r\n num_iters = 10\r\n with tf.device(device):\r\n iterator = make_iterator((images, labels))\r\n for _ in range(num_burn):\r\n (images, labels) = iterator.next()\r\n train_one_iter(model, images, labels, optimizer)\r\n if execution_mode:\r\n tfe.async_wait()\r\n self._force_device_sync()\r\n gc.collect()\r\n\r\n start = time.time()\r\n for _ in range(num_iters):\r\n (images, labels) = iterator.next()\r\n train_one_iter(model, images, labels, optimizer)\r\n if execution_mode:\r\n tfe.async_wait()\r\n self._force_device_sync()\r\n self._report(label, start, num_iters, device, batch_size, data_format)\r\n\r\n def benchmark_eager_train_sync(self):\r\n self._benchmark_eager_train(\r\n \"eager_train_sync\", MockIterator, device_and_data_format(), defun=False)\r\n\r\n def benchmark_eager_train_async(self):\r\n self._benchmark_eager_train(\r\n \"eager_train_async\",\r\n MockIterator,\r\n device_and_data_format(),\r\n defun=False,\r\n execution_mode=tfe.ASYNC)\r\n\r\n def benchmark_eager_train_defun(self):\r\n self._benchmark_eager_train(\r\n \"eager_train\", MockIterator, device_and_data_format(), defun=False)\r\n\r\n def benchmark_eager_train_datasets_with_defun(self):\r\n\r\n def make_iterator(tensors):\r\n with tf.device(\"/device:CPU:0\"):\r\n ds = tf.data.Dataset.from_tensors(tensors).repeat()\r\n return tfe.Iterator(ds)\r\n\r\n self._benchmark_eager_train(\r\n \"eager_train_dataset_with_defun\",\r\n make_iterator,\r\n device_and_data_format(),\r\n defun=True)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n tf.enable_eager_execution()\r\n tf.test.main()\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"A dataset loader for imports85.data.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\ntry:\r\n import pandas as pd # pylint: disable=g-import-not-at-top\r\nexcept ImportError:\r\n pass\r\n\r\n\r\nURL = \"https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data\"\r\n\r\n# Order is important for the csv-readers, so we use an OrderedDict here.\r\ndefaults = collections.OrderedDict([\r\n (\"symboling\", [0]),\r\n (\"normalized-losses\", [0.0]),\r\n (\"make\", [\"\"]),\r\n (\"fuel-type\", [\"\"]),\r\n (\"aspiration\", [\"\"]),\r\n (\"num-of-doors\", [\"\"]),\r\n (\"body-style\", [\"\"]),\r\n (\"drive-wheels\", [\"\"]),\r\n (\"engine-location\", [\"\"]),\r\n (\"wheel-base\", [0.0]),\r\n (\"length\", [0.0]),\r\n (\"width\", [0.0]),\r\n (\"height\", [0.0]),\r\n (\"curb-weight\", [0.0]),\r\n (\"engine-type\", [\"\"]),\r\n (\"num-of-cylinders\", [\"\"]),\r\n (\"engine-size\", [0.0]),\r\n (\"fuel-system\", [\"\"]),\r\n (\"bore\", [0.0]),\r\n (\"stroke\", [0.0]),\r\n (\"compression-ratio\", [0.0]),\r\n (\"horsepower\", [0.0]),\r\n (\"peak-rpm\", [0.0]),\r\n (\"city-mpg\", [0.0]),\r\n (\"highway-mpg\", [0.0]),\r\n (\"price\", [0.0])\r\n]) # pyformat: disable\r\n\r\n\r\ntypes = collections.OrderedDict((key, type(value[0]))\r\n for key, value in defaults.items())\r\n\r\n\r\ndef _get_imports85():\r\n path = tf.contrib.keras.utils.get_file(URL.split(\"/\")[-1], URL)\r\n return path\r\n\r\n\r\ndef dataset(y_name=\"price\", train_fraction=0.7):\r\n \"\"\"Load the imports85 data as a (train,test) pair of `Dataset`.\r\n\r\n Each dataset generates (features_dict, label) pairs.\r\n\r\n Args:\r\n y_name: The name of the column to use as the label.\r\n train_fraction: A float, the fraction of data to use for training. The\r\n remainder will be used for evaluation.\r\n Returns:\r\n A (train,test) pair of `Datasets`\r\n \"\"\"\r\n # Download and cache the data\r\n path = _get_imports85()\r\n\r\n # Define how the lines of the file should be parsed\r\n def decode_line(line):\r\n \"\"\"Convert a csv line into a (features_dict,label) pair.\"\"\"\r\n # Decode the line to a tuple of items based on the types of\r\n # csv_header.values().\r\n items = tf.decode_csv(line, list(defaults.values()))\r\n\r\n # Convert the keys and items to a dict.\r\n pairs = zip(defaults.keys(), items)\r\n features_dict = dict(pairs)\r\n\r\n # Remove the label from the features_dict\r\n label = features_dict.pop(y_name)\r\n\r\n return features_dict, label\r\n\r\n def has_no_question_marks(line):\r\n \"\"\"Returns True if the line of text has no question marks.\"\"\"\r\n # split the line into an array of characters\r\n chars = tf.string_split(line[tf.newaxis], \"\").values\r\n # for each character check if it is a question mark\r\n is_question = tf.equal(chars, \"?\")\r\n any_question = tf.reduce_any(is_question)\r\n no_question = ~any_question\r\n\r\n return no_question\r\n\r\n def in_training_set(line):\r\n \"\"\"Returns a boolean tensor, true if the line is in the training set.\"\"\"\r\n # If you randomly split the dataset you won't get the same split in both\r\n # sessions if you stop and restart training later. Also a simple\r\n # random split won't work with a dataset that's too big to `.cache()` as\r\n # we are doing here.\r\n num_buckets = 1000000\r\n bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)\r\n # Use the hash bucket id as a random number that's deterministic per example\r\n return bucket_id < int(train_fraction * num_buckets)\r\n\r\n def in_test_set(line):\r\n \"\"\"Returns a boolean tensor, true if the line is in the training set.\"\"\"\r\n # Items not in the training set are in the test set.\r\n # This line must use `~` instead of `not` because `not` only works on python\r\n # booleans but we are dealing with symbolic tensors.\r\n return ~in_training_set(line)\r\n\r\n base_dataset = (\r\n tf.data\r\n # Get the lines from the file.\r\n .TextLineDataset(path)\r\n # drop lines with question marks.\r\n .filter(has_no_question_marks))\r\n\r\n train = (base_dataset\r\n # Take only the training-set lines.\r\n .filter(in_training_set)\r\n # Decode each line into a (features_dict, label) pair.\r\n .map(decode_line)\r\n # Cache data so you only decode the file once.\r\n .cache())\r\n\r\n # Do the same for the test-set.\r\n test = (base_dataset.filter(in_test_set).cache().map(decode_line))\r\n\r\n return train, test\r\n\r\n\r\ndef raw_dataframe():\r\n \"\"\"Load the imports85 data as a pd.DataFrame.\"\"\"\r\n # Download and cache the data\r\n path = _get_imports85()\r\n\r\n # Load it into a pandas dataframe\r\n df = pd.read_csv(path, names=types.keys(), dtype=types, na_values=\"?\")\r\n\r\n return df\r\n\r\n\r\ndef load_data(y_name=\"price\", train_fraction=0.7, seed=None):\r\n \"\"\"Get the imports85 data set.\r\n\r\n A description of the data is available at:\r\n https://archive.ics.uci.edu/ml/datasets/automobile\r\n\r\n The data itself can be found at:\r\n https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data\r\n\r\n Args:\r\n y_name: the column to return as the label.\r\n train_fraction: the fraction of the dataset to use for training.\r\n seed: The random seed to use when shuffling the data. `None` generates a\r\n unique shuffle every run.\r\n Returns:\r\n a pair of pairs where the first pair is the training data, and the second\r\n is the test data:\r\n `(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`\r\n `x` contains a pandas DataFrame of features, while `y` contains the label\r\n array.\r\n \"\"\"\r\n # Load the raw data columns.\r\n data = raw_dataframe()\r\n\r\n # Delete rows with unknowns\r\n data = data.dropna()\r\n\r\n # Shuffle the data\r\n np.random.seed(seed)\r\n\r\n # Split the data into train/test subsets.\r\n x_train = data.sample(frac=train_fraction, random_state=seed)\r\n x_test = data.drop(x_train.index)\r\n\r\n # Extract the label from the features dataframe.\r\n y_train = x_train.pop(y_name)\r\n y_test = x_test.pop(y_name)\r\n\r\n return (x_train, y_train), (x_test, y_test)\r\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for `tf.data.Dataset.filter()`.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.data.kernel_tests import filter_test_base\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\n@test_util.run_all_in_graph_and_eager_modes\r\nclass FilterTest(filter_test_base.FilterTestBase):\r\n\r\n def apply_filter(self, input_dataset, predicate):\r\n return input_dataset.filter(predicate)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n" ]
[ [ "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.linalg_ops.eye", "tensorflow.contrib.timeseries.python.timeseries.state_space_models.state_space_model.StateSpaceModelConfiguration", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.python.ops.math_ops.reduce_mean", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.framework.tensor_shape.dimension_value", "tensorflow.python.ops.array_ops.pad", "tensorflow.contrib.timeseries.python.timeseries.math_utils.variable_covariance_matrix" ], [ "numpy.fft.fftshift", "numpy.fft.ifftshift", "tensorflow.python.platform.test.main", "numpy.zeros", "numpy.fft.fft2", "numpy.power", "tensorflow.python.ops.math_ops.complex", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.ops.signal.fft_ops.ifftshift", "tensorflow.python.framework.dtypes.as_dtype", "numpy.fft.rfft2", "numpy.random.rand", "tensorflow.python.framework.ops.convert_to_tensor", "numpy.fft.irfft2", "tensorflow.python.ops.math_ops.conj", "numpy.fft.ifft2", "numpy.ones", "tensorflow.python.ops.spectral_ops_test_util.fft_kernel_label_map", "tensorflow.python.ops.signal.fft_ops.fftshift", "numpy.prod", "numpy.random.uniform", "tensorflow.core.protobuf.config_pb2.ConfigProto" ], [ "tensorflow.python.data.experimental.ops.stats_aggregator.StatsAggregator", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors", "tensorflow.python.data.ops.dataset_ops.Options", "tensorflow.python.platform.test.main", "tensorflow.python.data.experimental.ops.optimization.assert_next" ], [ "tensorflow.python.util.deprecation.silence" ], [ "numpy.linalg.inv", "numpy.arange", "numpy.eye", "numpy.linalg.det", "tensorflow.python.ops.array_ops.matrix_band_part", "tensorflow.python.platform.test.main", "scipy.stats.multivariate_normal", "numpy.transpose", "tensorflow.python.ops.math_ops.matmul", "numpy.random.RandomState" ], [ "numpy.sqrt", "tensorflow.python.keras.initializers.ConstantV2", "tensorflow.python.keras.layers.Dense", "tensorflow.python.keras.initializers.lecun_normalV2", "tensorflow.python.tf2.enable", "tensorflow.python.keras.initializers.ZerosV2", "tensorflow.python.keras.initializers.he_uniformV2", "tensorflow.python.keras.initializers.GlorotUniformV2", "tensorflow.python.platform.test.main", "tensorflow.python.keras.initializers.RandomUniformV2", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.keras.initializers.deserialize", "tensorflow.python.keras.initializers.lecun_uniformV2", "tensorflow.python.keras.initializers.TruncatedNormalV2", "tensorflow.python.keras.initializers.IdentityV2", "tensorflow.python.keras.Model", "tensorflow.python.keras.initializers.GlorotNormalV2", "tensorflow.python.keras.initializers.RandomNormalV2", "tensorflow.python.keras.backend.get_value", "tensorflow.python.keras.initializers.OnesV2", "tensorflow.python.keras.Input", "tensorflow.python.keras.initializers.OrthogonalV2", "tensorflow.python.keras.initializers.he_normalV2", "tensorflow.python.keras.initializers.get", "tensorflow.python.ops.init_ops._compute_fans" ], [ "tensorflow.contrib.rnn.python.kernel_tests.benchmarking.device", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.init_ops.random_uniform_initializer", "tensorflow.python.ops.array_ops.zeros", "numpy.random.randn", "tensorflow.contrib.rnn.python.ops.lstm_ops.LSTMBlockFusedCell", "tensorflow.python.ops.variables.trainable_variables", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.ops.rnn_cell.BasicLSTMCell", "tensorflow.python.ops.rnn_cell.LSTMCell", "tensorflow.python.ops.gen_bitwise_ops.bitwise_and", "tensorflow.python.platform.test.main", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.contrib.rnn.python.ops.lstm_ops.LSTMBlockCell", "tensorflow.python.ops.rnn.static_rnn", "tensorflow.python.ops.variable_scope.get_variable_scope", "tensorflow.python.ops.gen_array_ops.bitcast", "tensorflow.python.client.session.Session", "tensorflow.python.ops.rnn.dynamic_rnn", "tensorflow.python.framework.ops.convert_to_tensor", "numpy.array", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.ops.init_ops.zeros_initializer", "numpy.ones", "tensorflow.contrib.rnn.python.kernel_tests.benchmarking.dict_product", "tensorflow.contrib.rnn.python.kernel_tests.benchmarking.seconds_per_run", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.keras.distribute.distributed_training_utils.set_distributed_function", "tensorflow.python.keras.backend.eager_learning_phase_scope", "tensorflow.python.keras.engine.training_eager.test_on_batch", "tensorflow.python.eager.def_function.function", "tensorflow.python.keras.engine.training_utils.prepare_sample_weight_modes", "tensorflow.python.keras.distribute.distributed_training_utils.get_distributed_function", "tensorflow.python.keras.distribute.distributed_training_utils.unwrap_output_dict", "tensorflow.python.framework.tensor_util.is_tensor", "tensorflow.python.keras.engine.training_utils.cast_if_floating_dtype", "tensorflow.python.distribute.distribution_strategy_context.get_strategy", "tensorflow.python.keras.engine.training_eager.train_on_batch", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.keras.engine.training_utils.ModelInputs", "tensorflow.python.util.nest.flatten", "tensorflow.python.ops.math_ops.cast" ], [ "tensorflow.python.platform.tf_logging.log", "tensorflow.core.util.event_pb2.LogMessage" ], [ "tensorflow.python.util.tf_inspect.isgenerator", "tensorflow.python.util.tf_export.keras_export", "tensorflow.python.keras.utils.generic_utils.Progbar", "numpy.random.seed" ], [ "tensorflow.python.ops.ragged.ragged_conversion_ops.from_tensor", "tensorflow.python.ops.ragged.ragged_factory_ops.constant", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.ops.ragged.ragged_squeeze_op.squeeze", "tensorflow.python.platform.googletest.main", "tensorflow.python.ops.ragged.ragged_conversion_ops.to_tensor", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.contrib.eager.python.examples.linear_regression.linear_regression.fit", "tensorflow.enable_eager_execution", "tensorflow.contrib.eager.Iterator", "tensorflow.test.main", "tensorflow.contrib.eager.python.examples.linear_regression.linear_regression.synthetic_dataset", "tensorflow.contrib.eager.python.examples.linear_regression.linear_regression.LinearModel", "tensorflow.train.GradientDescentOptimizer", "tensorflow.contrib.eager.num_gpus", "tensorflow.random_uniform" ], [ "tensorflow.python.platform.test.main", "tensorflow.python.data.experimental.ops.unique.unique", "tensorflow.python.data.ops.dataset_ops.Dataset.range" ], [ "numpy.product", "numpy.random.seed", "tensorflow.python.platform.test.main", "tensorflow.contrib.metrics.python.ops.metric_ops.precision_recall_at_equal_thresholds", "tensorflow.python.ops.variables.local_variables_initializer", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.framework.ops.reset_default_graph" ], [ "tensorflow.python.data.ops.dataset_ops.DatasetV2.from_tensor_slices", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.data.util.convert.optional_param_to_tensor", "tensorflow.python.ops.gen_dataset_ops.fixed_length_record_dataset_v2", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.gen_dataset_ops.tf_record_dataset", "tensorflow.python.ops.gen_experimental_dataset_ops.parallel_interleave_dataset", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.gen_dataset_ops.text_line_dataset", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.data.ops.dataset_ops.get_legacy_output_shapes", "tensorflow.python.data.ops.dataset_ops.get_legacy_output_types" ], [ "tensorflow.python.framework.test_util.run_v1_only", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.ops.variables.VariableV1", "tensorflow.python.training.server_lib.Server.create_local_server", "tensorflow.python.platform.test.main", "tensorflow.python.client.session.Session", "tensorflow.python.ops.math_ops.matmul" ], [ "numpy.int32", "tensorflow.contrib.distributions.python.ops.bijectors.permute.Permute", "tensorflow.python.ops.distributions.bijector_test_util.assert_bijective_and_finite", "tensorflow.python.ops.array_ops.placeholder", "numpy.random.randn", "tensorflow.python.platform.test.main", "numpy.random.RandomState" ], [ "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.summary.summary.scalar", "tensorflow.python.ops.math_ops.sqrt", "tensorflow.python.ops.state_ops.assign", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.ops.control_flow_ops.cond", "tensorflow.python.ops.math_ops.logical_not", "tensorflow.python.ops.math_ops.reduce_all", "tensorflow.python.ops.array_ops.fill", "tensorflow.python.training.training_util.get_global_step", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.ops.variable_scope.variable", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.contrib.factorization.python.ops.factorization_ops.WALSModel", "tensorflow.python.training.session_run_hook.SessionRunArgs" ], [ "tensorflow.python.ops.math_ops.range", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.gen_random_ops.random_gamma_grad", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.framework.ops.control_dependencies" ], [ "tensorflow.python.util.compat.as_text", "tensorflow.contrib.session_bundle.manifest_pb2.AssetFile", "tensorflow.python.framework.ops.add_to_collection", "tensorflow.python.platform.gfile.Exists", "tensorflow.python.training.training_util.global_step", "tensorflow.python.platform.gfile.Remove", "tensorflow.python.training.saver.Saver", "tensorflow.contrib.session_bundle.gc.negation", "tensorflow.contrib.session_bundle.manifest_pb2.Signature", "tensorflow.python.platform.gfile.DeleteRecursively", "tensorflow.python.platform.gfile.MakeDirs", "tensorflow.contrib.session_bundle.manifest_pb2.Signatures", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.platform.gfile.Copy", "tensorflow.python.platform.gfile.Rename", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.contrib.session_bundle.gc.get_paths", "tensorflow.core.framework.graph_pb2.GraphDef" ], [ "tensorflow.python.training.checkpoint_utils._get_checkpoint_filename", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.ops.variable_scope._PartitionInfo", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.training.saver.object_graph_key_mapping", "tensorflow.python.training.checkpoint_utils._is_variable", "tensorflow.python.ops.state_ops.assign", "tensorflow.python.training.checkpoint_utils.init_from_checkpoint", "tensorflow.python.training.saving.saveable_object_util.op_list_to_dict" ], [ "tensorflow.python.ops.gen_data_flow_ops.tensor_array_gather_v3", "tensorflow.python.ops.list_ops.tensor_list_split", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.list_ops.tensor_list_set_item", "tensorflow.python.ops.array_ops.split", "tensorflow.python.ops.list_ops.tensor_list_length", "tensorflow.python.ops.gen_data_flow_ops.tensor_array_read_v3", "tensorflow.python.framework.errors_impl.InvalidArgumentError", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.framework.type_spec.type_spec_from_value", "tensorflow.python.framework.ops.device", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.gen_data_flow_ops.tensor_array_v3", "tensorflow.python.ops.gen_data_flow_ops.tensor_array_concat_v3", "tensorflow.python.ops.array_ops.unstack", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.list_ops.tensor_list_get_item", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.ops.gen_data_flow_ops.tensor_array_grad_v3", "tensorflow.python.framework.type_spec.register_type_spec_from_value_converter", "tensorflow.python.ops.list_ops.tensor_list_concat", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.gen_data_flow_ops.tensor_array_scatter_v3", "tensorflow.python.ops.gen_data_flow_ops.tensor_array_size_v3", "tensorflow.python.ops.list_ops.tensor_list_gather", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.ops.list_ops.tensor_list_stack", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.gen_data_flow_ops.tensor_array_close_v3", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.tensor_shape.unknown_shape", "tensorflow.python.ops.gen_control_flow_ops.no_op", "tensorflow.python.ops.list_ops.tensor_list_from_tensor", "tensorflow.python.ops.list_ops.tensor_list_scatter", "tensorflow.python.ops.list_ops.tensor_list_reserve", "tensorflow.python.ops.gen_data_flow_ops.tensor_array_write_v3", "tensorflow.python.ops.math_ops.range", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.ops.gen_data_flow_ops.tensor_array_split_v3", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.framework.errors_impl.OutOfRangeError", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.python.framework.test_util.run_v1_only", "tensorflow.python.data.ops.dataset_ops.Dataset.zip", "tensorflow.python.platform.test.main", "tensorflow.python.data.ops.dataset_ops.Dataset.range" ], [ "tensorflow.python.keras.models.clone_model", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.keras.backend.name_scope", "tensorflow.python.keras.layers.merge.concatenate", "tensorflow.python.util.tf_export.keras_export", "tensorflow.python.keras.layers.core.Lambda", "tensorflow.python.framework.ops.device", "tensorflow.python.keras.backend.get_session", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.keras.engine.training.Model" ], [ "tensorflow.python.ops.linalg.linear_operator_test_util.add_tests", "tensorflow.python.ops.linalg.linear_operator_householder.LinearOperatorHouseholder", "tensorflow.python.ops.linalg_ops.norm", "tensorflow.python.ops.variables.Variable", "tensorflow.python.platform.test.main", "tensorflow.python.ops.linalg.linear_operator_test_util.random_sign_uniform", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.ops.array_ops.placeholder_with_default", "tensorflow.python.ops.array_ops.matrix_diag_part" ], [ "tensorflow.python.ops.array_ops.gather", "tensorflow.python.platform.test.main", "tensorflow.python.data.experimental.ops.error_ops.ignore_errors", "tensorflow.python.data.ops.dataset_ops.Dataset.range" ], [ "tensorflow.python.ops.confusion_matrix.confusion_matrix" ], [ "tensorflow.python.training.server_lib.ClusterSpec", "tensorflow.python.util.tf_export.tf_export" ], [ "tensorflow.python.keras.optimizer_v2.gradient_descent.SGD", "tensorflow.python.keras.layers.Dense", "tensorflow.python.keras.Model", "tensorflow.python.ops.variables.Variable", "tensorflow.python.keras.Sequential", "numpy.random.rand", "tensorflow.python.keras.layers.Input" ], [ "tensorflow.python.ops.nn_impl.batch_normalization", "tensorflow.python.ops.variables.trainable_variables", "tensorflow.python.ops.random_ops.truncated_normal", "tensorflow.python.ops.nn_impl.moments", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.math_ops.rsqrt", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.client.session.Session", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.gen_nn_ops._batch_norm_with_global_normalization", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.math_ops.abs", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.ops.state_ops.scatter_update", "tensorflow.python.ops.resource_variable_ops.resource_scatter_update", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.state_ops.scatter_add", "tensorflow.python.ops.math_ops.cast" ], [ "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.platform.googletest.main" ], [ "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.framework.test_util.run_in_graph_and_eager_modes", "numpy.sqrt", "numpy.linspace", "tensorflow.python.ops.distributions.kullback_leibler.kl_divergence", "tensorflow.python.ops.distributions.normal.Normal", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.eager.backprop.GradientTape", "tensorflow.python.ops.gradients_impl.gradients", "numpy.exp", "tensorflow.python.ops.nn_ops.softplus", "tensorflow.python.ops.distributions.normal.Normal.param_static_shapes", "numpy.ones_like", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.ones", "numpy.log", "tensorflow.python.ops.distributions.normal.NormalWithSoftplusScale", "tensorflow.python.ops.distributions.normal.Normal.param_shapes", "numpy.array", "numpy.random.RandomState", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.platform.app.run", "tensorflow.python.tools.strip_unused_lib.strip_unused_from_files" ], [ "tensorflow.python.platform.tf_logging.set_verbosity", "tensorflow.python.platform.tf_logging.warn", "tensorflow.python.eager.profiler_client.monitor", "tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver.TPUClusterResolver", "tensorflow.python.framework.errors.UnavailableError" ], [ "tensorflow.contrib.boosted_trees.python.ops.gen_quantile_ops.quantile_accumulator_is_initialized", "tensorflow.contrib.boosted_trees.python.ops.gen_quantile_ops.quantile_stream_resource_handle_op", "tensorflow.contrib.boosted_trees.python.ops.gen_quantile_ops.quantile_accumulator_add_summaries", "tensorflow.contrib.boosted_trees.python.ops.gen_quantile_ops.create_quantile_accumulator", "tensorflow.contrib.boosted_trees.python.ops.gen_quantile_ops.quantile_accumulator_flush_summary", "tensorflow.contrib.boosted_trees.python.ops.gen_quantile_ops.quantile_accumulator_deserialize", "tensorflow.python.training.saver.BaseSaverBuilder.SaveSpec", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.framework.ops.add_to_collection", "tensorflow.python.framework.ops.name_scope", "tensorflow.contrib.boosted_trees.python.ops.gen_quantile_ops.make_quantile_summaries", "tensorflow.contrib.boosted_trees.python.ops.batch_ops_utils.ScheduledStampedResourceOp", "tensorflow.contrib.boosted_trees.python.ops.gen_quantile_ops.quantile_accumulator_serialize", "tensorflow.python.ops.resources.register_resource", "tensorflow.contrib.boosted_trees.python.ops.gen_quantile_ops.quantile_accumulator_flush", "tensorflow.contrib.boosted_trees.python.ops.gen_quantile_ops.quantile_accumulator_get_buckets" ], [ "tensorflow.python.framework.test_util.run_in_graph_and_eager_modes", "tensorflow.python.keras.layers.Embedding", "tensorflow.python.keras.layers.Dense", "tensorflow.core.protobuf.config_pb2.GraphOptions", "tensorflow.python.ops.array_ops.zeros", "numpy.random.randn", "tensorflow.python.eager.backprop.GradientTape", "tensorflow.python.ops.gen_math_ops.less", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.keras.layers.recurrent_v2.GRU", "numpy.random.randint", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.keras.regularizers.l1", "tensorflow.python.platform.test.main", "tensorflow.python.framework.test_util.device", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.keras.keras_parameterized.run_all_keras_modes", "tensorflow.python.keras.utils.to_categorical", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.keras.layers.Masking", "tensorflow.python.keras.testing_utils.should_run_tf_function", "tensorflow.python.ops.math_ops.square", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.keras.models.Sequential", "tensorflow.python.keras.testing_utils.layer_test", "numpy.testing.assert_allclose", "tensorflow.python.keras.layers.Input", "tensorflow.python.keras.constraints.max_norm", "tensorflow.python.training.gradient_descent.GradientDescentOptimizer", "tensorflow.python.keras.testing_utils.should_run_eagerly", "tensorflow.python.keras.testing_utils.get_test_data", "tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig", "numpy.random.random", "numpy.ones", "tensorflow.python.keras.models.Model", "tensorflow.python.keras.layers.recurrent_v2._runtime", "tensorflow.python.keras.layers.recurrent.GRU", "numpy.random.uniform", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.init_ops.zeros_initializer" ], [ "tensorflow.python.ops.math_ops.greater_equal", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.array_ops.constant", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.array_ops.ensure_shape", "tensorflow.python.ops.array_ops.strided_slice", "tensorflow.python.ops.math_ops.greater", "tensorflow.python.util.deprecation.deprecated_endpoints", "tensorflow.python.ops.math_ops.not_equal", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.control_flow_ops.cond", "tensorflow.python.ops.control_flow_ops.Assert", "tensorflow.python.ops.math_ops.logical_not", "tensorflow.python.ops.array_ops.rank", "tensorflow.python.ops.math_ops.reduce_all", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.python.ops.math_ops.abs", "tensorflow.python.ops.control_flow_ops.with_dependencies", "tensorflow.python.ops.math_ops.less", "tensorflow.python.ops.array_ops.where", "tensorflow.python.util.tf_export.tf_export", "numpy.finfo", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.ops.array_ops.size", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.math_ops.equal", "numpy.equal", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.math_ops.less_equal", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.boolean_mask", "tensorflow.python.framework.errors.InvalidArgumentError" ], [ "tensorflow.python.framework.sparse_tensor.SparseTensorValue", "numpy.ndarray", "tensorflow.python.framework.sparse_tensor.SparseTensor", "tensorflow.python.platform.googletest.main", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.composite_tensor_utils.append_composite_tensor", "numpy.array" ], [ "tensorflow.python.util.tf_inspect.getmembers" ], [ "tensorflow.python.data.util.options.create_option", "tensorflow.python.util.tf_export.tf_export" ], [ "tensorflow.contrib.hadoop.python.ops.gen_dataset_ops.sequence_file_dataset", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.util.deprecation.deprecated" ], [ "tensorflow.python.framework.ops.name_scope", "tensorflow.python.eager.function.defun", "tensorflow.python.util.tf_export.tf_export" ], [ "numpy.pad", "numpy.arange", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.platform.test.main", "numpy.prod", "tensorflow.python.framework.ops.device" ], [ "tensorflow.python.framework.test_util.run_v1_only", "tensorflow.python.platform.test.main" ], [ "numpy.expand_dims", "tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model", "numpy.random.random_sample", "tensorflow.lite.python.lite.TFLiteConverter.from_keras_model_file", "tensorflow.lite.python.convert_saved_model.get_inputs_outputs", "tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model", "tensorflow.python.platform.resource_loader.get_root_dir_with_all_resources", "numpy.testing.assert_almost_equal", "tensorflow.python.keras.preprocessing.image.load_img", "tensorflow.lite.python.lite.TFLiteConverter.from_saved_model", "tensorflow.python.saved_model.loader.load", "tensorflow.lite.python.convert_saved_model.get_signature_def", "tensorflow.lite.python.lite.Interpreter", "tensorflow.python.platform.resource_loader.get_path_to_datafile", "tensorflow.python.framework.importer.import_graph_def", "tensorflow.python.keras.models.load_model", "tensorflow.lite.python.lite.TFLiteConverter.from_frozen_graph", "tensorflow.python.client.session.Session", "tensorflow.python.saved_model.load.load", "tensorflow.lite.python.util.get_tensors_from_tensor_names", "numpy.random.seed", "tensorflow.python.lib.io.file_io.FileIO", "tensorflow.python.framework.ops.Graph", "tensorflow.python.keras.preprocessing.image.img_to_array", "tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions", "tensorflow.core.framework.graph_pb2.GraphDef", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator", "tensorflow.python.training.experimental.loss_scale.FixedLossScale", "tensorflow.python.distribute.distribution_strategy_context.has_strategy", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.training.experimental.loss_scale.DynamicLossScale", "numpy.finfo", "tensorflow.python.platform.test.main", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.ops.check_ops.assert_equal", "tensorflow.python.distribute.distribution_strategy_context.get_strategy", "tensorflow.python.distribute.mirrored_strategy.MirroredStrategy", "tensorflow.python.distribute.distribution_strategy_context.get_replica_context", "tensorflow.python.training.experimental.loss_scale.DynamicLossScale.from_config", "tensorflow.python.eager.context.num_gpus", "tensorflow.python.training.experimental.loss_scale.FixedLossScale.from_config", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.training.experimental.loss_scale.get", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.python.distribute.device_util.local_devices_from_num_gpus", "tensorflow.python.distribute.parameter_server_strategy.ParameterServerStrategyExtended", "tensorflow.python.util.tf_export.tf_export" ], [ "tensorflow.python.ops.array_ops.fake_quant_with_min_max_args", "tensorflow.python.ops.array_ops.fake_quant_with_min_max_vars_gradient", "tensorflow.python.ops.gen_array_ops.fake_quant_with_min_max_args_gradient", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.platform.googletest.main", "numpy.array", "tensorflow.python.ops.array_ops.fake_quant_with_min_max_vars" ], [ "numpy.asarray", "numpy.array", "tensorflow.core.protobuf.tpu.topology_pb2.TopologyProto" ], [ "numpy.get_printoptions", "tensorflow.core.framework.summary_pb2.Summary.Value", "tensorflow.python.platform.tf_logging.error", "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.python.client.timeline.Timeline", "tensorflow.python.training.summary_io.SummaryWriterCache.get", "tensorflow.python.training.training_util.get_global_step", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.platform.tf_logging.log_first_n", "tensorflow.python.ops.init_ops.ones_initializer", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.platform.tf_logging.warning", "numpy.isnan", "tensorflow.python.training.training_util._get_or_create_global_step_read", "tensorflow.core.util.event_pb2.SessionLog", "numpy.set_printoptions", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.training.session_run_hook.SessionRunArgs", "tensorflow.python.platform.gfile.Open" ], [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.python.distribute.cross_device_utils.contains_indexed_slices", "tensorflow.python.distribute.values.ReplicaDeviceMap", "tensorflow.python.eager.test.main", "tensorflow.python.distribute.combinations.combine", "tensorflow.python.distribute.cross_device_utils.aggregate_tensors_or_indexed_slices", "tensorflow.python.distribute.device_util.resolve", "tensorflow.python.distribute.values.PerReplica", "tensorflow.python.distribute.cross_device_utils.copy_tensor_or_indexed_slices_to_device", "tensorflow.python.distribute.cross_device_utils.divide_by_n_tensors_or_indexed_slices", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.ops.device", "tensorflow.python.framework.constant_op.constant" ], [ "matplotlib.pyplot.legend", "tensorflow.concat", "tensorflow.contrib.timeseries.CSVReader", "tensorflow.zeros", "numpy.squeeze", "numpy.concatenate", "matplotlib.pyplot.plot", "tensorflow.make_template", "tensorflow.compat.v1.app.run", "tensorflow.feature_column.embedding_column", "tensorflow.Graph", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.contrib.timeseries.saved_model_utils.predict_continuation", "tensorflow.squeeze", "numpy.zeros", "tensorflow.contrib.timeseries.predict_continuation_input_fn", "tensorflow.train.MonitoredSession", "tensorflow.contrib.timeseries.WholeDatasetInputFn", "tensorflow.feature_column.categorical_column_with_hash_bucket", "tensorflow.assert_equal", "tensorflow.feature_column.numeric_column", "tensorflow.contrib.timeseries.RandomWindowInputFn", "matplotlib.pyplot.show", "numpy.array", "tensorflow.contrib.timeseries.saved_model_utils.cold_start_filter", "tensorflow.contrib.timeseries.python.timeseries.state_management.ChainingStateManager", "matplotlib.pyplot.axvline", "tensorflow.reduce_mean", "matplotlib.use", "tensorflow.nn.rnn_cell.LSTMCell", "numpy.ones", "tensorflow.compat.v1.Session", "tensorflow.variable_scope", "tensorflow.saved_model.loader.load" ], [ "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.eager.test.main", "tensorflow.python.framework.ops.Graph", "tensorflow.python.distribute.combinations.combine", "tensorflow.python.data.ops.dataset_ops.Dataset.range" ], [ "tensorflow.device", "tensorflow.enable_eager_execution", "tensorflow.python.client.device_lib.list_local_devices", "tensorflow.DeviceSpec.from_string", "tensorflow.train.AdamOptimizer", "tensorflow.Graph", "tensorflow.Variable", "tensorflow.test.main", "tensorflow.Session", "tensorflow.contrib.eager.python.examples.revnet.blocks_test.compute_degree", "tensorflow.data.Dataset.from_tensors", "tensorflow.identity", "tensorflow.contrib.eager.python.examples.revnet.revnet.RevNet", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.contrib.eager.python.examples.revnet.config.get_hparams_imagenet_56", "tensorflow.GradientTape", "tensorflow.contrib.eager.python.examples.revnet.config.get_hparams_cifar_38", "tensorflow.constant", "tensorflow.reshape", "tensorflow.test.is_gpu_available", "tensorflow.random_uniform", "tensorflow.random_normal" ], [ "tensorflow.string_to_hash_bucket_fast", "numpy.random.seed", "tensorflow.reduce_any", "tensorflow.equal", "tensorflow.string_split", "tensorflow.data.TextLineDataset" ], [ "tensorflow.python.platform.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.3", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "2.4", "2.3", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.4", "1.5", "1.7", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "2.4", "2.3", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "1.7", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "2.4", "2.3", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.3", "2.2", "2.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.12" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "1.4", "2.7", "2.2", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.6", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
Adarsh2910/katib
[ "cd095d6a33401cfddee8188943b60cd12c950c33" ]
[ "pkg/suggestion/v1beta1/nas/enas/service.py" ]
[ "import logging\nfrom logging import getLogger, StreamHandler, INFO\nimport json\nimport os\nimport tensorflow as tf\nimport grpc\n\nfrom pkg.apis.manager.v1beta1.python import api_pb2\nfrom pkg.apis.manager.v1beta1.python import api_pb2_grpc\nfrom pkg.suggestion.v1beta1.nas.enas.Controller import Controller\nfrom pkg.suggestion.v1beta1.nas.enas.Operation import SearchSpace\nfrom pkg.suggestion.v1beta1.nas.enas.AlgorithmSettings import (\n parseAlgorithmSettings, algorithmSettingsValidator, enableNoneSettingsList)\nfrom pkg.suggestion.v1beta1.internal.base_health_service import HealthServicer\n\n\nclass EnasExperiment:\n def __init__(self, request, logger):\n self.logger = logger\n self.experiment_name = request.experiment.name\n self.experiment = request.experiment\n self.num_trials = 1\n self.tf_graph = tf.Graph()\n self.ctrl_cache_file = \"ctrl_cache/{}.ckpt\".format(\n self.experiment_name)\n self.suggestion_step = 0\n self.algorithm_settings = None\n self.controller = None\n self.num_layers = None\n self.input_sizes = None\n self.output_sizes = None\n self.num_operations = None\n self.search_space = None\n self.opt_direction = None\n self.objective_name = None\n self.logger.info(\"-\" * 100 + \"\\nSetting Up Suggestion for Experiment {}\\n\".format(\n self.experiment_name) + \"-\" * 100)\n self._get_experiment_param()\n self._setup_controller()\n self.logger.info(\">>> Suggestion for Experiment {} has been initialized.\\n\".format(\n self.experiment_name))\n\n def _get_experiment_param(self):\n # this function need to\n # 1) get the number of layers\n # 2) get the I/O size\n # 3) get the available operations\n # 4) get the optimization direction (i.e. minimize or maximize)\n # 5) get the objective name\n # 6) get the algorithm settings\n\n # Get Search Space\n self.opt_direction = self.experiment.spec.objective.type\n self.objective_name = self.experiment.spec.objective.objective_metric_name\n\n nas_config = self.experiment.spec.nas_config\n\n graph_config = nas_config.graph_config\n self.num_layers = int(graph_config.num_layers)\n self.input_sizes = list(map(int, graph_config.input_sizes))\n self.output_sizes = list(map(int, graph_config.output_sizes))\n\n search_space_raw = nas_config.operations\n search_space_object = SearchSpace(search_space_raw)\n self.search_space = search_space_object.search_space\n self.num_operations = search_space_object.num_operations\n\n self.print_search_space()\n\n # Get Experiment Algorithm Settings\n settings_raw = self.experiment.spec.algorithm.algorithm_settings\n self.algorithm_settings = parseAlgorithmSettings(settings_raw)\n\n self.print_algorithm_settings()\n\n def _setup_controller(self):\n\n with self.tf_graph.as_default():\n\n self.controller = Controller(\n num_layers=self.num_layers,\n num_operations=self.num_operations,\n controller_hidden_size=self.algorithm_settings['controller_hidden_size'],\n controller_temperature=self.algorithm_settings['controller_temperature'],\n controller_tanh_const=self.algorithm_settings['controller_tanh_const'],\n controller_entropy_weight=self.algorithm_settings['controller_entropy_weight'],\n controller_baseline_decay=self.algorithm_settings['controller_baseline_decay'],\n controller_learning_rate=self.algorithm_settings[\"controller_learning_rate\"],\n controller_skip_target=self.algorithm_settings['controller_skip_target'],\n controller_skip_weight=self.algorithm_settings['controller_skip_weight'],\n controller_name=\"Ctrl_\" + self.experiment_name,\n logger=self.logger)\n\n self.controller.build_trainer()\n\n def print_search_space(self):\n if self.search_space is None:\n self.logger.warning(\n \"Error! The Suggestion has not yet been initialized!\")\n return\n\n self.logger.info(\n \">>> Search Space for Experiment {}\".format(self.experiment_name))\n for opt in self.search_space:\n opt.print_op(self.logger)\n self.logger.info(\n \"There are {} operations in total.\\n\".format(self.num_operations))\n\n def print_algorithm_settings(self):\n if self.algorithm_settings is None:\n self.logger.warning(\n \"Error! The Suggestion has not yet been initialized!\")\n return\n\n self.logger.info(\">>> Parameters of LSTM Controller for Experiment {}\\n\".format(\n self.experiment_name))\n for spec in self.algorithm_settings:\n if len(spec) > 22:\n self.logger.info(\"{}:\\t{}\".format(\n spec, self.algorithm_settings[spec]))\n else:\n self.logger.info(\"{}:\\t\\t{}\".format(\n spec, self.algorithm_settings[spec]))\n\n self.logger.info(\"\")\n\n\nclass EnasService(api_pb2_grpc.SuggestionServicer, HealthServicer):\n def __init__(self, logger=None):\n super(EnasService, self).__init__()\n self.is_first_run = True\n self.experiment = None\n if logger == None:\n self.logger = getLogger(__name__)\n FORMAT = '%(asctime)-15s Experiment %(experiment_name)s %(message)s'\n logging.basicConfig(format=FORMAT)\n handler = StreamHandler()\n handler.setLevel(INFO)\n self.logger.setLevel(INFO)\n self.logger.addHandler(handler)\n self.logger.propagate = False\n else:\n self.logger = logger\n\n if not os.path.exists(\"ctrl_cache/\"):\n os.makedirs(\"ctrl_cache/\")\n\n def ValidateAlgorithmSettings(self, request, context):\n self.logger.info(\"Validate Algorithm Settings start\")\n graph_config = request.experiment.spec.nas_config.graph_config\n\n # Validate GraphConfig\n # Check InputSize\n if not graph_config.input_sizes:\n return self.SetValidateContextError(context, \"Missing InputSizes in GraphConfig:\\n{}\".format(graph_config))\n\n # Check OutputSize\n if not graph_config.output_sizes:\n return self.SetValidateContextError(context, \"Missing OutputSizes in GraphConfig:\\n{}\".format(graph_config))\n\n # Check NumLayers\n if not graph_config.num_layers:\n return self.SetValidateContextError(context, \"Missing NumLayers in GraphConfig:\\n{}\".format(graph_config))\n\n # Validate each operation\n operations_list = list(\n request.experiment.spec.nas_config.operations.operation)\n for operation in operations_list:\n\n # Check OperationType\n if not operation.operation_type:\n return self.SetValidateContextError(context, \"Missing operationType in Operation:\\n{}\".format(operation))\n\n # Check ParameterConfigs\n if not operation.parameter_specs.parameters:\n return self.SetValidateContextError(context, \"Missing ParameterConfigs in Operation:\\n{}\".format(operation))\n\n # Validate each ParameterConfig in Operation\n parameters_list = list(operation.parameter_specs.parameters)\n for parameter in parameters_list:\n\n # Check Name\n if not parameter.name:\n return self.SetValidateContextError(context, \"Missing Name in ParameterConfig:\\n{}\".format(parameter))\n\n # Check ParameterType\n if not parameter.parameter_type:\n return self.SetValidateContextError(context, \"Missing ParameterType in ParameterConfig:\\n{}\".format(parameter))\n\n # Check List in Categorical or Discrete Type\n if parameter.parameter_type == api_pb2.CATEGORICAL or parameter.parameter_type == api_pb2.DISCRETE:\n if not parameter.feasible_space.list:\n return self.SetValidateContextError(context, \"Missing List in ParameterConfig.feasibleSpace:\\n{}\".format(parameter))\n\n # Check Max, Min, Step in Int or Double Type\n elif parameter.parameter_type == api_pb2.INT or parameter.parameter_type == api_pb2.DOUBLE:\n if not parameter.feasible_space.min and not parameter.feasible_space.max:\n return self.SetValidateContextError(context, \"Missing Max and Min in ParameterConfig.feasibleSpace:\\n{}\".format(parameter))\n\n if parameter.parameter_type == api_pb2.DOUBLE and (not parameter.feasible_space.step or float(parameter.feasible_space.step) <= 0):\n return self.SetValidateContextError(context, \"Step parameter should be > 0 in ParameterConfig.feasibleSpace:\\n{}\".format(parameter))\n\n # Validate Algorithm Settings\n settings_raw = request.experiment.spec.algorithm.algorithm_settings\n for setting in settings_raw:\n if setting.name in algorithmSettingsValidator.keys():\n if setting.name in enableNoneSettingsList and setting.value == \"None\":\n continue\n setting_type = algorithmSettingsValidator[setting.name][0]\n setting_range = algorithmSettingsValidator[setting.name][1]\n try:\n converted_value = setting_type(setting.value)\n except:\n return self.SetValidateContextError(context, \"Algorithm Setting {} must be {} type\".format(setting.name, setting_type.__name__))\n\n if setting_type == float:\n if converted_value <= setting_range[0] or (setting_range[1] != 'inf' and converted_value > setting_range[1]):\n return self.SetValidateContextError(context, \"Algorithm Setting {}: {} with {} type must be in range ({}, {}]\".format(\n setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]\n ))\n\n elif converted_value < setting_range[0]:\n return self.SetValidateContextError(context, \"Algorithm Setting {}: {} with {} type must be in range [{}, {})\".format(\n setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]\n ))\n else:\n return self.SetValidateContextError(context, \"Unknown Algorithm Setting name: {}\".format(setting.name))\n\n self.logger.info(\"All Experiment Settings are Valid\")\n return api_pb2.ValidateAlgorithmSettingsReply()\n\n def SetValidateContextError(self, context, error_message):\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n context.set_details(error_message)\n self.logger.info(error_message)\n return api_pb2.ValidateAlgorithmSettingsReply()\n\n def GetSuggestions(self, request, context):\n if self.is_first_run:\n self.experiment = EnasExperiment(request, self.logger)\n experiment = self.experiment\n if request.request_number > 0:\n experiment.num_trials = request.request_number\n self.logger.info(\"-\" * 100 + \"\\nSuggestion Step {} for Experiment {}\\n\".format(\n experiment.suggestion_step, experiment.experiment_name) + \"-\" * 100)\n\n self.logger.info(\"\")\n self.logger.info(\">>> RequestNumber:\\t\\t{}\".format(experiment.num_trials))\n self.logger.info(\"\")\n\n with experiment.tf_graph.as_default():\n saver = tf.compat.v1.train.Saver()\n ctrl = experiment.controller\n\n controller_ops = {\n \"loss\": ctrl.loss,\n \"entropy\": ctrl.sample_entropy,\n \"grad_norm\": ctrl.grad_norm,\n \"baseline\": ctrl.baseline,\n \"skip_rate\": ctrl.skip_rate,\n \"train_op\": ctrl.train_op,\n \"train_step\": ctrl.train_step,\n \"sample_arc\": ctrl.sample_arc,\n \"child_val_accuracy\": ctrl.child_val_accuracy,\n }\n\n if self.is_first_run:\n self.logger.info(\">>> First time running suggestion for {}. Random architecture will be given.\".format(\n experiment.experiment_name))\n with tf.compat.v1.Session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n candidates = list()\n for _ in range(experiment.num_trials):\n candidates.append(\n sess.run(controller_ops[\"sample_arc\"]))\n\n # TODO: will use PVC to store the checkpoint to protect against unexpected suggestion pod restart\n saver.save(sess, experiment.ctrl_cache_file)\n\n self.is_first_run = False\n\n else:\n with tf.compat.v1.Session() as sess:\n saver.restore(sess, experiment.ctrl_cache_file)\n\n result = self.GetEvaluationResult(request.trials)\n\n # TODO: (andreyvelich) I deleted this part, should it be handle by controller?\n # Sometimes training container may fail and GetEvaluationResult() will return None\n # In this case, the Suggestion will:\n # 1. Firstly try to respawn the previous trials after waiting for RESPAWN_SLEEP seconds\n # 2. If respawning the trials for RESPAWN_LIMIT times still cannot collect valid results,\n # then fail the task because it may indicate that the training container has errors.\n if result is None:\n self.logger.warning(\n \">>> Suggestion has spawned trials, but they all failed.\")\n self.logger.warning(\n \">>> Please check whether the training container is correctly implemented\")\n self.logger.info(\">>> Experiment {} failed\".format(\n experiment.experiment_name))\n return []\n\n # This LSTM network is designed to maximize the metrics\n # However, if the user wants to minimize the metrics, we can take the negative of the result\n\n if experiment.opt_direction == api_pb2.MINIMIZE:\n result = -result\n\n self.logger.info(\">>> Suggestion updated. LSTM Controller Training\\n\")\n log_every = experiment.algorithm_settings[\"controller_log_every_steps\"]\n for ctrl_step in range(1, experiment.algorithm_settings[\"controller_train_steps\"]+1):\n run_ops = [\n controller_ops[\"loss\"],\n controller_ops[\"entropy\"],\n controller_ops[\"grad_norm\"],\n controller_ops[\"baseline\"],\n controller_ops[\"skip_rate\"],\n controller_ops[\"train_op\"]\n ]\n\n loss, entropy, grad_norm, baseline, skip_rate, _ = sess.run(\n fetches=run_ops,\n feed_dict={controller_ops[\"child_val_accuracy\"]: result})\n\n controller_step = sess.run(controller_ops[\"train_step\"])\n if ctrl_step % log_every == 0:\n log_string = \"\"\n log_string += \"Controller Step: {} - \".format(controller_step)\n log_string += \"Loss: {:.4f} - \".format(loss)\n log_string += \"Entropy: {:.9} - \".format(entropy)\n log_string += \"Gradient Norm: {:.7f} - \".format(grad_norm)\n log_string += \"Baseline={:.4f} - \".format(baseline)\n log_string += \"Skip Rate={:.4f}\".format(skip_rate)\n self.logger.info(log_string)\n\n candidates = list()\n for _ in range(experiment.num_trials):\n candidates.append(\n sess.run(controller_ops[\"sample_arc\"]))\n\n saver.save(sess, experiment.ctrl_cache_file)\n\n organized_candidates = list()\n parameter_assignments = list()\n\n for i in range(experiment.num_trials):\n arc = candidates[i].tolist()\n organized_arc = [0 for _ in range(experiment.num_layers)]\n record = 0\n for l in range(experiment.num_layers):\n organized_arc[l] = arc[record: record + l + 1]\n record += l + 1\n organized_candidates.append(organized_arc)\n\n nn_config = dict()\n nn_config['num_layers'] = experiment.num_layers\n nn_config['input_sizes'] = experiment.input_sizes\n nn_config['output_sizes'] = experiment.output_sizes\n nn_config['embedding'] = dict()\n for l in range(experiment.num_layers):\n opt = organized_arc[l][0]\n nn_config['embedding'][opt] = experiment.search_space[opt].get_dict()\n\n organized_arc_json = json.dumps(organized_arc)\n nn_config_json = json.dumps(nn_config)\n\n organized_arc_str = str(organized_arc_json).replace('\\\"', '\\'')\n nn_config_str = str(nn_config_json).replace('\\\"', '\\'')\n\n self.logger.info(\n \"\\n>>> New Neural Network Architecture Candidate #{} (internal representation):\".format(i))\n self.logger.info(organized_arc_json)\n self.logger.info(\"\\n>>> Corresponding Seach Space Description:\")\n self.logger.info(nn_config_str)\n\n parameter_assignments.append(\n api_pb2.GetSuggestionsReply.ParameterAssignments(\n assignments=[\n api_pb2.ParameterAssignment(\n name=\"architecture\",\n value=organized_arc_str\n ),\n api_pb2.ParameterAssignment(\n name=\"nn_config\",\n value=nn_config_str\n )\n ]\n )\n )\n\n self.logger.info(\"\")\n self.logger.info(\">>> {} Trials were created for Experiment {}\".format(\n experiment.num_trials, experiment.experiment_name))\n self.logger.info(\"\")\n\n experiment.suggestion_step += 1\n\n return api_pb2.GetSuggestionsReply(parameter_assignments=parameter_assignments)\n\n def GetEvaluationResult(self, trials_list):\n completed_trials = dict()\n failed_trials = []\n for t in trials_list:\n if t.status.condition == api_pb2.TrialStatus.TrialConditionType.SUCCEEDED:\n target_value = None\n for metric in t.status.observation.metrics:\n if metric.name == t.spec.objective.objective_metric_name:\n target_value = metric.value\n break\n\n # Take only the first metric value\n # In current cifar-10 training container this value is the latest\n completed_trials[t.name] = float(target_value)\n\n if t.status.condition == api_pb2.TrialStatus.TrialConditionType.FAILED:\n failed_trials.append(t.name)\n\n n_completed = len(completed_trials)\n self.logger.info(\">>> By now: {} Trials succeeded, {} Trials failed\".format(\n n_completed, len(failed_trials)))\n for tname in completed_trials:\n self.logger.info(\"Trial: {}, Value: {}\".format(\n tname, completed_trials[tname]))\n for tname in failed_trials:\n self.logger.info(\"Trial: {} was failed\".format(tname))\n\n if n_completed > 0:\n avg_metrics = sum(completed_trials.values()) / n_completed\n self.logger.info(\"The average is {}\\n\".format(avg_metrics))\n\n return avg_metrics\n" ]
[ [ "tensorflow.compat.v1.Session", "tensorflow.Graph", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.global_variables_initializer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
raimonpv/NeuroKit
[ "cb37d83ee20d6a13a91c4848aa435f41e979e203", "cb37d83ee20d6a13a91c4848aa435f41e979e203" ]
[ "tests/tests_hrv.py", "tests/tests_eeg.py" ]
[ "import numpy as np\n\nimport neurokit2 as nk\n\n\ndef test_hrv_time():\n ecg_slow = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=70, random_state=42)\n ecg_fast = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=110, random_state=42)\n\n _, peaks_slow = nk.ecg_process(ecg_slow, sampling_rate=1000)\n _, peaks_fast = nk.ecg_process(ecg_fast, sampling_rate=1000)\n\n hrv_slow = nk.hrv_time(peaks_slow, sampling_rate=1000)\n hrv_fast = nk.hrv_time(peaks_fast, sampling_rate=1000)\n\n assert np.all(hrv_fast[\"HRV_RMSSD\"] < hrv_slow[\"HRV_RMSSD\"])\n assert np.all(hrv_fast[\"HRV_MeanNN\"] < hrv_slow[\"HRV_MeanNN\"])\n assert np.all(hrv_fast[\"HRV_SDNN\"] < hrv_slow[\"HRV_SDNN\"])\n assert np.all(hrv_fast[\"HRV_CVNN\"] < hrv_slow[\"HRV_CVNN\"])\n assert np.all(hrv_fast[\"HRV_CVSD\"] < hrv_slow[\"HRV_CVSD\"])\n assert np.all(hrv_fast[\"HRV_MedianNN\"] < hrv_slow[\"HRV_MedianNN\"])\n assert np.all(hrv_fast[\"HRV_MadNN\"] < hrv_slow[\"HRV_MadNN\"])\n assert np.all(hrv_fast[\"HRV_MCVNN\"] < hrv_slow[\"HRV_MCVNN\"])\n assert np.all(hrv_fast[\"HRV_pNN50\"] == hrv_slow[\"HRV_pNN50\"])\n assert np.all(hrv_fast[\"HRV_pNN20\"] < hrv_slow[\"HRV_pNN20\"])\n assert np.all(hrv_fast[\"HRV_TINN\"] < hrv_slow[\"HRV_TINN\"])\n assert np.all(hrv_fast[\"HRV_HTI\"] > hrv_slow[\"HRV_HTI\"])\n\n\ndef test_hrv_frequency():\n # Test frequency domain\n ecg1 = nk.ecg_simulate(duration=60, sampling_rate=2000, heart_rate=70, random_state=42)\n _, peaks1 = nk.ecg_process(ecg1, sampling_rate=2000)\n hrv1 = nk.hrv_frequency(peaks1, sampling_rate=2000)\n\n ecg2 = nk.signal_resample(ecg1, sampling_rate=2000, desired_sampling_rate=500)\n _, peaks2 = nk.ecg_process(ecg2, sampling_rate=500)\n hrv2 = nk.hrv_frequency(peaks2, sampling_rate=500)\n\n assert np.allclose(hrv1[\"HRV_HF\"] - hrv2[\"HRV_HF\"], 0, atol=1.5)\n assert np.isnan(hrv1[\"HRV_LF\"][0])\n assert np.isnan(hrv2[\"HRV_LF\"][0])\n assert np.isnan(hrv1[\"HRV_VLF\"][0])\n assert np.isnan(hrv2[\"HRV_LF\"][0])\n\n\ndef test_hrv():\n\n ecg = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=110, random_state=42)\n\n _, peaks = nk.ecg_process(ecg, sampling_rate=1000)\n\n ecg_hrv = nk.hrv(peaks, sampling_rate=1000)\n\n columns = ['HRV_RMSSD', 'HRV_MeanNN', 'HRV_SDNN', 'HRV_SDSD', 'HRV_CVNN',\n 'HRV_CVSD', 'HRV_MedianNN', 'HRV_MadNN', 'HRV_MCVNN', 'HRV_IQRNN',\n 'HRV_pNN50', 'HRV_pNN20', 'HRV_TINN', 'HRV_HTI', 'HRV_ULF',\n 'HRV_VLF', 'HRV_LF', 'HRV_HF', 'HRV_VHF', 'HRV_LFHF', 'HRV_LFn',\n 'HRV_HFn', 'HRV_LnHF', 'HRV_SD1', 'HRV_SD2', 'HRV_SD1SD2', 'HRV_S',\n 'HRV_CSI', 'HRV_CVI', 'HRV_CSI_Modified', 'HRV_PIP', 'HRV_IALS',\n 'HRV_PSS', 'HRV_PAS', 'HRV_GI', 'HRV_SI', 'HRV_AI', 'HRV_PI',\n 'HRV_C1d', 'HRV_C1a', 'HRV_SD1d',\n 'HRV_SD1a', 'HRV_C2d',\n 'HRV_C2a', 'HRV_SD2d', 'HRV_SD2a',\n 'HRV_Cd', 'HRV_Ca', 'HRV_SDNNd',\n 'HRV_SDNNa', 'HRV_ApEn', 'HRV_SampEn']\n\n assert all(elem in np.array(ecg_hrv.columns.values, dtype=object) for elem\n in columns)", "import mne\nimport numpy as np\n\nimport neurokit2 as nk\n\n# =============================================================================\n# EEG\n# =============================================================================\n\n\ndef test_eeg_add_channel():\n\n raw = mne.io.read_raw_fif(mne.datasets.sample.data_path() + \"/MEG/sample/sample_audvis_raw.fif\", preload=True)\n\n # len(channel) > len(raw)\n ecg1 = nk.ecg_simulate(length=170000)\n\n # sync_index_raw > sync_index_channel\n raw1 = nk.mne_channel_add(raw.copy(), ecg1, channel_type=\"ecg\", sync_index_raw=100, sync_index_channel=0)\n df1 = raw1.to_data_frame()\n\n # test if the column of channel is added\n assert len(df1.columns) == 378\n\n # test if the NaN is appended properly to the added channel to account for difference in distance between two signals\n sync_index_raw = 100\n sync_index_channel = 0\n for i in df1[\"Added_Channel\"].head(abs(sync_index_channel - sync_index_raw)):\n assert np.isnan(i)\n assert np.isfinite(df1[\"Added_Channel\"].iloc[abs(sync_index_channel - sync_index_raw)])\n\n # len(channel) < len(raw)\n ecg2 = nk.ecg_simulate(length=166790)\n\n # sync_index_raw < sync_index_channel\n raw2 = nk.mne_channel_add(raw.copy(), ecg2, channel_type=\"ecg\", sync_index_raw=0, sync_index_channel=100)\n df2 = raw2.to_data_frame()\n\n # test if the column of channel is added\n assert len(df2.columns) == 378\n\n # test if the NaN is appended properly to the added channel to account for difference in distance between two signals + difference in length\n sync_index_raw = 0\n sync_index_channel = 100\n for i in df2[\"Added_Channel\"].tail(abs(sync_index_channel - sync_index_raw) + (len(raw) - len(ecg2))):\n assert np.isnan(i)\n assert np.isfinite(\n df2[\"Added_Channel\"].iloc[-abs(sync_index_channel - sync_index_raw) - (len(raw) - len(ecg2)) - 1]\n )\n\n\ndef test_mne_channel_extract():\n\n raw = mne.io.read_raw_fif(mne.datasets.sample.data_path() + \"/MEG/sample/sample_audvis_raw.fif\", preload=True)\n\n # Extract 1 channel\n what = \"EEG 053\"\n\n raw_channel = nk.mne_channel_extract(raw, what)\n assert raw_channel.what == what\n\n # Extract more than 1 channel\n what2 = [\"EEG 053\", \"EEG 054\", \"EEG 055\"]\n\n raw_channel2 = nk.mne_channel_extract(raw, what2)\n assert len(raw_channel2.columns) == 3\n assert all(elem in what2 for elem in np.array(raw_channel2.columns.values, dtype=str))\n\n # Extract a category of channels\n what3 = \"EEG\"\n\n raw_channels = nk.mne_channel_extract(raw, what3)\n assert len(raw_channels.columns) == 60\n\n raw_eeg_names = [x for x in raw.info[\"ch_names\"] if what3 in x]\n assert raw_eeg_names == list(raw_channels.columns.values)\n" ]
[ [ "numpy.all", "numpy.array", "numpy.isnan", "numpy.allclose" ], [ "numpy.isnan", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gumpy-bci/gumpy
[ "c51ee75ddf1eaa58813b493282014da6f31f5591" ]
[ "gumpy/split.py" ]
[ "import sklearn.model_selection\nimport numpy as np\nfrom sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit, cross_val_score, StratifiedKFold\n\n\ndef normal(X, labels, test_size):\n \"\"\"Split a dataset into training and test parts.\n Args:\n X (numpy.ndarray): 2D features matrix \n labels: labels vector \n test_size: size of the split\n \n Returns:\n A 2D CSP features matrix \n \"\"\"\n Y = labels\n X_train, X_test, Y_train, Y_test = \\\n sklearn.model_selection.train_test_split(X, Y,\n test_size=test_size,\n random_state=0)\n return X_train, X_test, Y_train, Y_test\n\n\ndef time_series_split(features, labels, n_splits):\n \"\"\"Split a dataset into n splits.\n\n \"\"\"\n xx = sklearn.model_selection.TimeSeriesSplit(n_splits)\n for train_index, test_index in xx.split(features):\n X_train, X_test = features[train_index], features[test_index]\n y_train, y_test = labels[train_index], labels[test_index]\n\n return X_train, X_test, y_train, y_test\n\n\ndef stratified_KFold(features, labels, n_splits):\n\n \"\"\"Stratified K-Folds cross-validator\n Stratification is the process of rearranging the data as to ensure each fold is a good representative of the whole\n and by also keeping the balance of classes\n \"\"\"\n skf = StratifiedKFold(n_splits)\n skf.get_n_splits(features, labels)\n for train_index, test_index in skf.split(features, labels):\n X_train, X_test = features[train_index], features[test_index]\n Y_train, Y_test = labels[train_index], labels[test_index]\n return X_train, X_test, Y_train, Y_test\n\n#Stratified ShuffleSplit cross-validator\ndef stratified_shuffle_Split(features, labels, n_splits,test_size,random_state):\n\n \"\"\"Stratified ShuffleSplit cross-validator\n \"\"\"\n cv = StratifiedShuffleSplit(n_splits, test_size, random_state=random_state)\n for train_index, test_index in cv.split(features,labels):\n X_train = features[train_index]\n X_test = features[test_index]\n Y_train = labels[train_index]\n Y_test = labels[test_index]\n return X_train, X_test, Y_train, Y_test\n\n\n#Random permutation cross-validator\ndef shuffle_Split(features, labels, n_splits,test_size,random_state):\n\n \"\"\"ShuffleSplit: Random permutation cross-validator\n \"\"\"\n cv = ShuffleSplit(n_splits, test_size, random_state=random_state)\n for train_index, test_index in cv.split(features):\n X_train = features[train_index]\n X_test = features[test_index]\n Y_train = labels[train_index]\n Y_test = labels[test_index]\n return X_train, X_test, Y_train, Y_test\n" ]
[ [ "sklearn.model_selection.StratifiedShuffleSplit", "sklearn.model_selection.ShuffleSplit", "sklearn.model_selection.StratifiedKFold" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
segasai/desispec
[ "4786347a8ad44effa4985671423f7ba0129ba6c3", "4786347a8ad44effa4985671423f7ba0129ba6c3" ]
[ "py/desispec/scripts/stdstars.py", "py/desispec/fluxcalibration.py" ]
[ "\n\n\"\"\"\nGet the normalized best template to do flux calibration.\n\"\"\"\n\n#- TODO: refactor algorithmic code into a separate module/function\n\nimport argparse\nimport sys\n\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy import units\nfrom astropy.table import Table\n\nfrom desispec import io\nfrom desispec.fluxcalibration import match_templates,normalize_templates,isStdStar\nfrom desispec.interpolation import resample_flux\nfrom desiutil.log import get_logger\nfrom desispec.parallel import default_nproc\nfrom desispec.io.filters import load_legacy_survey_filter\nfrom desiutil.dust import ext_odonnell,extinction_total_to_selective_ratio\nfrom desispec.fiberbitmasking import get_fiberbitmasked_frame\n\ndef parse(options=None):\n parser = argparse.ArgumentParser(description=\"Fit of standard star spectra in frames.\")\n parser.add_argument('--frames', type = str, default = None, required=True, nargs='*',\n help = 'list of path to DESI frame fits files (needs to be same exposure, spectro)')\n parser.add_argument('--skymodels', type = str, default = None, required=True, nargs='*',\n help = 'list of path to DESI sky model fits files (needs to be same exposure, spectro)')\n parser.add_argument('--fiberflats', type = str, default = None, required=True, nargs='*',\n help = 'list of path to DESI fiberflats fits files (needs to be same exposure, spectro)')\n parser.add_argument('--starmodels', type = str, help = 'path of spectro-photometric stellar spectra fits')\n parser.add_argument('-o','--outfile', type = str, help = 'output file for normalized stdstar model flux')\n parser.add_argument('--ncpu', type = int, default = default_nproc, required = False, help = 'use ncpu for multiprocessing')\n parser.add_argument('--delta-color', type = float, default = 0.2, required = False, help = 'max delta-color for the selection of standard stars (on top of meas. errors)')\n parser.add_argument('--color', type = str, default = \"G-R\", choices=['G-R', 'R-Z'], required = False, help = 'color for selection of standard stars')\n parser.add_argument('--z-max', type = float, default = 0.008, required = False, help = 'max peculiar velocity (blue/red)shift range')\n parser.add_argument('--z-res', type = float, default = 0.00002, required = False, help = 'dz grid resolution')\n parser.add_argument('--template-error', type = float, default = 0.1, required = False, help = 'fractional template error used in chi2 computation (about 0.1 for BOSS b1)')\n parser.add_argument('--maxstdstars', type=int, default=30, \\\n help='Maximum number of stdstars to include')\n\n log = get_logger()\n args = None\n if options is None:\n args = parser.parse_args()\n cmd = ' '.join(sys.argv)\n else:\n args = parser.parse_args(options)\n cmd = 'desi_fit_stdstars ' + ' '.join(options)\n\n log.info('RUNNING {}'.format(cmd))\n\n return args\n\ndef safe_read_key(header,key) :\n value = None\n try :\n value=header[key]\n except KeyError :\n value = None\n pass\n if value is None : # second try\n value=header[key.ljust(8).upper()]\n return value\n\ndef dust_transmission(wave,ebv) :\n Rv = 3.1\n extinction = ext_odonnell(wave,Rv=Rv)\n return 10**(-Rv*extinction*ebv/2.5)\n\ndef main(args) :\n \"\"\" finds the best models of all standard stars in the frame\n and normlize the model flux. Output is written to a file and will be called for calibration.\n \"\"\"\n\n log = get_logger()\n\n log.info(\"mag delta %s = %f (for the pre-selection of stellar models)\"%(args.color,args.delta_color))\n log.info('multiprocess parallelizing with {} processes'.format(args.ncpu))\n\n # READ DATA\n ############################################\n # First loop through and group by exposure and spectrograph\n frames_by_expid = {}\n for filename in args.frames :\n log.info(\"reading %s\"%filename)\n frame=io.read_frame(filename)\n expid = safe_read_key(frame.meta,\"EXPID\")\n camera = safe_read_key(frame.meta,\"CAMERA\").strip().lower()\n spec = camera[1]\n uniq_key = (expid,spec)\n if uniq_key in frames_by_expid.keys():\n frames_by_expid[uniq_key][camera] = frame\n else:\n frames_by_expid[uniq_key] = {camera: frame}\n\n frames={}\n flats={}\n skies={}\n\n spectrograph=None\n starfibers=None\n starindices=None\n fibermap=None\n\n # For each unique expid,spec pair, get the logical OR of the FIBERSTATUS for all\n # cameras and then proceed with extracting the frame information\n # once we modify the fibermap FIBERSTATUS\n for (expid,spec),camdict in frames_by_expid.items():\n\n fiberstatus = None\n for frame in camdict.values():\n if fiberstatus is None:\n fiberstatus = frame.fibermap['FIBERSTATUS'].data.copy()\n else:\n fiberstatus |= frame.fibermap['FIBERSTATUS']\n\n for camera,frame in camdict.items():\n frame.fibermap['FIBERSTATUS'] |= fiberstatus\n # Set fibermask flagged spectra to have 0 flux and variance\n frame = get_fiberbitmasked_frame(frame,bitmask='stdstars',ivar_framemask=True)\n frame_fibermap = frame.fibermap\n frame_starindices = np.where(isStdStar(frame_fibermap))[0]\n\n #- Confirm that all fluxes have entries but trust targeting bits\n #- to get basic magnitude range correct\n keep = np.ones(len(frame_starindices), dtype=bool)\n\n for colname in ['FLUX_G', 'FLUX_R', 'FLUX_Z']: #- and W1 and W2?\n keep &= frame_fibermap[colname][frame_starindices] > 10**((22.5-30)/2.5)\n keep &= frame_fibermap[colname][frame_starindices] < 10**((22.5-0)/2.5)\n\n frame_starindices = frame_starindices[keep]\n\n if spectrograph is None :\n spectrograph = frame.spectrograph\n fibermap = frame_fibermap\n starindices=frame_starindices\n starfibers=fibermap[\"FIBER\"][starindices]\n\n elif spectrograph != frame.spectrograph :\n log.error(\"incompatible spectrographs %d != %d\"%(spectrograph,frame.spectrograph))\n raise ValueError(\"incompatible spectrographs %d != %d\"%(spectrograph,frame.spectrograph))\n elif starindices.size != frame_starindices.size or np.sum(starindices!=frame_starindices)>0 :\n log.error(\"incompatible fibermap\")\n raise ValueError(\"incompatible fibermap\")\n\n if not camera in frames :\n frames[camera]=[]\n\n frames[camera].append(frame)\n\n # possibly cleanup memory\n del frames_by_expid\n\n for filename in args.skymodels :\n log.info(\"reading %s\"%filename)\n sky=io.read_sky(filename)\n camera=safe_read_key(sky.header,\"CAMERA\").strip().lower()\n if not camera in skies :\n skies[camera]=[]\n skies[camera].append(sky)\n\n for filename in args.fiberflats :\n log.info(\"reading %s\"%filename)\n flat=io.read_fiberflat(filename)\n camera=safe_read_key(flat.header,\"CAMERA\").strip().lower()\n\n # NEED TO ADD MORE CHECKS\n if camera in flats:\n log.warning(\"cannot handle several flats of same camera (%s), will use only the first one\"%camera)\n #raise ValueError(\"cannot handle several flats of same camera (%s)\"%camera)\n else :\n flats[camera]=flat\n\n\n if starindices.size == 0 :\n log.error(\"no STD star found in fibermap\")\n raise ValueError(\"no STD star found in fibermap\")\n\n log.info(\"found %d STD stars\"%starindices.size)\n\n # log.warning(\"Not using flux errors for Standard Star fits!\")\n\n # DIVIDE FLAT AND SUBTRACT SKY , TRIM DATA\n ############################################\n # since poping dict, we need to copy keys to iterate over to avoid\n # RuntimeError due to changing dict\n frame_cams = list(frames.keys())\n for cam in frame_cams:\n\n if not cam in skies:\n log.warning(\"Missing sky for %s\"%cam)\n frames.pop(cam)\n continue\n if not cam in flats:\n log.warning(\"Missing flat for %s\"%cam)\n frames.pop(cam)\n continue\n\n flat=flats[cam]\n for frame,sky in zip(frames[cam],skies[cam]) :\n frame.flux = frame.flux[starindices]\n frame.ivar = frame.ivar[starindices]\n frame.ivar *= (frame.mask[starindices] == 0)\n frame.ivar *= (sky.ivar[starindices] != 0)\n frame.ivar *= (sky.mask[starindices] == 0)\n frame.ivar *= (flat.ivar[starindices] != 0)\n frame.ivar *= (flat.mask[starindices] == 0)\n frame.flux *= ( frame.ivar > 0) # just for clean plots\n for star in range(frame.flux.shape[0]) :\n ok=np.where((frame.ivar[star]>0)&(flat.fiberflat[star]!=0))[0]\n if ok.size > 0 :\n frame.flux[star] = frame.flux[star]/flat.fiberflat[star] - sky.flux[star]\n frame.resolution_data = frame.resolution_data[starindices]\n\n nframes=len(frames[cam])\n if nframes>1 :\n # optimal weights for the coaddition = ivar*throughput, not directly ivar,\n # we estimate the relative throughput with median fluxes at this stage\n medflux=np.zeros(nframes)\n for i,frame in enumerate(frames[cam]) :\n if np.sum(frame.ivar>0) == 0 :\n log.error(\"ivar=0 for all std star spectra in frame {}-{:08d}\".format(cam,frame.meta[\"EXPID\"]))\n else :\n medflux[i] = np.median(frame.flux[frame.ivar>0])\n log.debug(\"medflux = {}\".format(medflux))\n medflux *= (medflux>0)\n if np.sum(medflux>0)==0 :\n log.error(\"mean median flux = 0, for all stars in fibers {}\".format(list(frames[cam][0].fibermap[\"FIBER\"][starindices])))\n sys.exit(12)\n mmedflux = np.mean(medflux[medflux>0])\n weights=medflux/mmedflux\n log.info(\"coadding {} exposures in cam {}, w={}\".format(nframes,cam,weights))\n\n sw=np.zeros(frames[cam][0].flux.shape)\n swf=np.zeros(frames[cam][0].flux.shape)\n swr=np.zeros(frames[cam][0].resolution_data.shape)\n\n for i,frame in enumerate(frames[cam]) :\n sw += weights[i]*frame.ivar\n swf += weights[i]*frame.ivar*frame.flux\n swr += weights[i]*frame.ivar[:,None,:]*frame.resolution_data\n coadded_frame = frames[cam][0]\n coadded_frame.ivar = sw\n coadded_frame.flux = swf/(sw+(sw==0))\n coadded_frame.resolution_data = swr/((sw+(sw==0))[:,None,:])\n frames[cam] = [ coadded_frame ]\n\n\n # CHECK S/N\n ############################################\n # for each band in 'brz', record quadratic sum of median S/N across wavelength\n snr=dict()\n for band in ['b','r','z'] :\n snr[band]=np.zeros(starindices.size)\n for cam in frames :\n band=cam[0].lower()\n for frame in frames[cam] :\n msnr = np.median( frame.flux * np.sqrt( frame.ivar ) / np.sqrt(np.gradient(frame.wave)) , axis=1 ) # median SNR per sqrt(A.)\n msnr *= (msnr>0)\n snr[band] = np.sqrt( snr[band]**2 + msnr**2 )\n log.info(\"SNR(B) = {}\".format(snr['b']))\n\n ###############################\n max_number_of_stars = 50\n min_blue_snr = 4.\n ###############################\n indices=np.argsort(snr['b'])[::-1][:max_number_of_stars]\n\n validstars = np.where(snr['b'][indices]>min_blue_snr)[0]\n\n #- TODO: later we filter on models based upon color, thus throwing\n #- away very blue stars for which we don't have good models.\n\n log.info(\"Number of stars with median stacked blue S/N > {} /sqrt(A) = {}\".format(min_blue_snr,validstars.size))\n if validstars.size == 0 :\n log.error(\"No valid star\")\n sys.exit(12)\n\n validstars = indices[validstars]\n\n for band in ['b','r','z'] :\n snr[band]=snr[band][validstars]\n\n log.info(\"BLUE SNR of selected stars={}\".format(snr['b']))\n\n for cam in frames :\n for frame in frames[cam] :\n frame.flux = frame.flux[validstars]\n frame.ivar = frame.ivar[validstars]\n frame.resolution_data = frame.resolution_data[validstars]\n starindices = starindices[validstars]\n starfibers = starfibers[validstars]\n nstars = starindices.size\n fibermap = Table(fibermap[starindices])\n\n # MASK OUT THROUGHPUT DIP REGION\n ############################################\n mask_throughput_dip_region = True\n if mask_throughput_dip_region :\n wmin=4300.\n wmax=4500.\n log.warning(\"Masking out the wavelength region [{},{}]A in the standard star fit\".format(wmin,wmax))\n for cam in frames :\n for frame in frames[cam] :\n ii=np.where( (frame.wave>=wmin)&(frame.wave<=wmax) )[0]\n if ii.size>0 :\n frame.ivar[:,ii] = 0\n\n # READ MODELS\n ############################################\n log.info(\"reading star models in %s\"%args.starmodels)\n stdwave,stdflux,templateid,teff,logg,feh=io.read_stdstar_templates(args.starmodels)\n\n # COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG\n ############################################\n\n #- Support older fibermaps\n if 'PHOTSYS' not in fibermap.colnames:\n log.warning('Old fibermap format; using defaults for missing columns')\n log.warning(\" PHOTSYS = 'S'\")\n log.warning(\" EBV = 0.0\")\n fibermap['PHOTSYS'] = 'S'\n fibermap['EBV'] = 0.0\n\n model_filters = dict()\n for band in [\"G\",\"R\",\"Z\"] :\n for photsys in np.unique(fibermap['PHOTSYS']) :\n model_filters[band+photsys] = load_legacy_survey_filter(band=band,photsys=photsys)\n\n log.info(\"computing model mags for %s\"%sorted(model_filters.keys()))\n model_mags = dict()\n fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom\n for filter_name, filter_response in model_filters.items():\n model_mags[filter_name] = filter_response.get_ab_magnitude(stdflux*fluxunits,stdwave)\n log.info(\"done computing model mags\")\n\n # LOOP ON STARS TO FIND BEST MODEL\n ############################################\n linear_coefficients=np.zeros((nstars,stdflux.shape[0]))\n chi2dof=np.zeros((nstars))\n redshift=np.zeros((nstars))\n normflux=[]\n\n star_mags = dict()\n star_unextincted_mags = dict()\n\n photometric_systems = np.unique(fibermap['PHOTSYS'])\n for band in ['G', 'R', 'Z']:\n star_mags[band] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band])\n star_unextincted_mags[band] = np.zeros(star_mags[band].shape)\n for photsys in photometric_systems :\n r_band = extinction_total_to_selective_ratio(band , photsys) # dimensionless\n # r_band = a_band / E(B-V)\n # E(B-V) is a difference of magnitudes (dimensionless)\n # a_band = -2.5*log10(effective dust transmission) , dimensionless\n # effective dust transmission =\n # integral( SED(lambda) * filter_transmission(lambda,band) * milkyway_dust_transmission(lambda,E(B-V)) dlamdba)\n # / integral( SED(lambda) * filter_transmission(lambda,band) dlamdba)\n selection = (fibermap['PHOTSYS'] == photsys)\n a_band = r_band * fibermap['EBV'][selection] # dimensionless\n star_unextincted_mags[band][selection] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band][selection]) - a_band\n\n star_colors = dict()\n star_colors['G-R'] = star_mags['G'] - star_mags['R']\n star_colors['R-Z'] = star_mags['R'] - star_mags['Z']\n\n star_unextincted_colors = dict()\n star_unextincted_colors['G-R'] = star_unextincted_mags['G'] - star_unextincted_mags['R']\n star_unextincted_colors['R-Z'] = star_unextincted_mags['R'] - star_unextincted_mags['Z']\n\n fitted_model_colors = np.zeros(nstars)\n\n for star in range(nstars) :\n\n log.info(\"finding best model for observed star #%d\"%star)\n\n # np.array of wave,flux,ivar,resol\n wave = {}\n flux = {}\n ivar = {}\n resolution_data = {}\n for camera in frames :\n for i,frame in enumerate(frames[camera]) :\n identifier=\"%s-%d\"%(camera,i)\n wave[identifier]=frame.wave\n flux[identifier]=frame.flux[star]\n ivar[identifier]=frame.ivar[star]\n resolution_data[identifier]=frame.resolution_data[star]\n\n # preselect models based on magnitudes\n photsys=fibermap['PHOTSYS'][star]\n if not args.color in ['G-R','R-Z'] :\n raise ValueError('Unknown color {}'.format(args.color))\n bands=args.color.split(\"-\")\n model_colors = model_mags[bands[0]+photsys] - model_mags[bands[1]+photsys]\n\n color_diff = model_colors - star_unextincted_colors[args.color][star]\n selection = np.abs(color_diff) < args.delta_color\n if np.sum(selection) == 0 :\n log.warning(\"no model in the selected color range for this star\")\n continue\n\n\n # smallest cube in parameter space including this selection (needed for interpolation)\n new_selection = (teff>=np.min(teff[selection]))&(teff<=np.max(teff[selection]))\n new_selection &= (logg>=np.min(logg[selection]))&(logg<=np.max(logg[selection]))\n new_selection &= (feh>=np.min(feh[selection]))&(feh<=np.max(feh[selection]))\n selection = np.where(new_selection)[0]\n\n log.info(\"star#%d fiber #%d, %s = %f, number of pre-selected models = %d/%d\"%(\n star, starfibers[star], args.color, star_unextincted_colors[args.color][star],\n selection.size, stdflux.shape[0]))\n\n # Match unextincted standard stars to data\n coefficients, redshift[star], chi2dof[star] = match_templates(\n wave, flux, ivar, resolution_data,\n stdwave, stdflux[selection],\n teff[selection], logg[selection], feh[selection],\n ncpu=args.ncpu, z_max=args.z_max, z_res=args.z_res,\n template_error=args.template_error\n )\n\n linear_coefficients[star,selection] = coefficients\n\n log.info('Star Fiber: {}; TEFF: {:.3f}; LOGG: {:.3f}; FEH: {:.3f}; Redshift: {:g}; Chisq/dof: {:.3f}'.format(\n starfibers[star],\n np.inner(teff,linear_coefficients[star]),\n np.inner(logg,linear_coefficients[star]),\n np.inner(feh,linear_coefficients[star]),\n redshift[star],\n chi2dof[star])\n )\n\n # Apply redshift to original spectrum at full resolution\n model=np.zeros(stdwave.size)\n redshifted_stdwave = stdwave*(1+redshift[star])\n for i,c in enumerate(linear_coefficients[star]) :\n if c != 0 :\n model += c*np.interp(stdwave,redshifted_stdwave,stdflux[i])\n\n # Apply dust extinction to the model\n log.info(\"Applying MW dust extinction to star {} with EBV = {}\".format(star,fibermap['EBV'][star]))\n model *= dust_transmission(stdwave, fibermap['EBV'][star])\n\n # Compute final color of dust-extincted model\n photsys=fibermap['PHOTSYS'][star]\n if not args.color in ['G-R','R-Z'] :\n raise ValueError('Unknown color {}'.format(args.color))\n bands=args.color.split(\"-\")\n model_mag1 = model_filters[bands[0]+photsys].get_ab_magnitude(model*fluxunits, stdwave)\n model_mag2 = model_filters[bands[1]+photsys].get_ab_magnitude(model*fluxunits, stdwave)\n fitted_model_colors[star] = model_mag1 - model_mag2\n if bands[0]==\"R\" :\n model_magr = model_mag1\n elif bands[1]==\"R\" :\n model_magr = model_mag2\n\n #- TODO: move this back into normalize_templates, at the cost of\n #- recalculating a model magnitude?\n\n # Normalize the best model using reported magnitude\n scalefac=10**((model_magr - star_mags['R'][star])/2.5)\n\n log.info('scaling R mag {:.3f} to {:.3f} using scale {}'.format(model_magr, star_mags['R'][star], scalefac))\n normflux.append(model*scalefac)\n\n # Now write the normalized flux for all best models to a file\n normflux=np.array(normflux)\n\n fitted_stars = np.where(chi2dof != 0)[0]\n if fitted_stars.size == 0 :\n log.error(\"No star has been fit.\")\n sys.exit(12)\n\n data={}\n data['LOGG']=linear_coefficients[fitted_stars,:].dot(logg)\n data['TEFF']= linear_coefficients[fitted_stars,:].dot(teff)\n data['FEH']= linear_coefficients[fitted_stars,:].dot(feh)\n data['CHI2DOF']=chi2dof[fitted_stars]\n data['REDSHIFT']=redshift[fitted_stars]\n data['COEFF']=linear_coefficients[fitted_stars,:]\n data['DATA_%s'%args.color]=star_colors[args.color][fitted_stars]\n data['MODEL_%s'%args.color]=fitted_model_colors[fitted_stars]\n data['BLUE_SNR'] = snr['b'][fitted_stars]\n data['RED_SNR'] = snr['r'][fitted_stars]\n data['NIR_SNR'] = snr['z'][fitted_stars]\n io.write_stdstar_models(args.outfile,normflux,stdwave,starfibers[fitted_stars],data)\n", "\"\"\"\ndesispec.fluxcalibration\n========================\n\nFlux calibration routines.\n\"\"\"\nfrom __future__ import absolute_import\nimport numpy as np\nfrom .resolution import Resolution\nfrom .linalg import cholesky_solve, cholesky_solve_and_invert, spline_fit\nfrom .interpolation import resample_flux\nfrom desiutil.log import get_logger\nfrom .io.filters import load_legacy_survey_filter\nfrom desispec import util\nfrom desispec.frame import Frame\nfrom desitarget.targets import main_cmx_or_sv\nimport scipy, scipy.sparse, scipy.ndimage\nimport sys\nimport time\nfrom astropy import units\nimport multiprocessing\nfrom pkg_resources import resource_exists, resource_filename\nimport numpy.linalg\nimport copy\n\ntry:\n from scipy import constants\n C_LIGHT = constants.c/1000.0\nexcept TypeError: # This can happen during documentation builds.\n C_LIGHT = 299792458.0/1000.0\n\ndef isStdStar(fibermap, bright=None):\n \"\"\"\n Determines if target(s) are standard stars\n\n Args:\n fibermap: table including DESI_TARGET or SV1_DESI_TARGET bit mask(s)\n\n Optional:\n bright: if True, only bright time standards; if False, only darktime, otherwise both\n\n Returns bool or array of bool\n \"\"\"\n log = get_logger()\n target_colnames, target_masks, survey = main_cmx_or_sv(fibermap)\n desi_target = fibermap[target_colnames[0]] # (SV1_)DESI_TARGET\n mws_target = fibermap[target_colnames[2]] # (SV1_)MWS_TARGET\n desi_mask = target_masks[0] # (sv1_)desi_mask\n mws_mask = target_masks[2] # (sv1_)mws_mask\n\n # mapping of which stdstar bits to use depending upon `bright` input\n # NOTE: STD_WD and GAIA_STD_WD not yet included in stdstar fitting\n desiDict ={\n None:['STD_FAINT','STD_BRIGHT', 'SV0_STD_FAINT', 'SV0_STD_BRIGHT'],\n True: ['STD_BRIGHT', 'SV0_STD_BRIGHT'],\n False: ['STD_FAINT', 'SV0_STD_FAINT']\n }\n mwsDict ={\n None:['GAIA_STD_FAINT','GAIA_STD_BRIGHT'],\n True:['GAIA_STD_BRIGHT'],\n False:['GAIA_STD_FAINT'],\n }\n\n yes = np.zeros_like(desi_target, dtype=bool)\n for k in desiDict[bright]:\n if k in desi_mask.names():\n yes = yes | ((desi_target & desi_mask[k])!=0)\n yes_mws = np.zeros_like(desi_target, dtype=bool)\n for k in mwsDict[bright]:\n if k in mws_mask.names():\n yes_mws |= ((mws_target & mws_mask[k])!=0)\n yes = yes | yes_mws\n\n #- Hack for data on 20201214 where some fiberassign files had targeting\n #- bits set to 0, but column FA_TYPE was still ok\n #- Hardcode mask to avoid fiberassign dependency loop\n FA_STDSTAR_MASK = 2 # fiberassing.targets.TARGET_TYPE_STANDARD\n if np.count_nonzero(yes) == 0:\n log.error(f'No standard stars found in {target_colnames[0]} or {target_colnames[2]}')\n if 'FA_TYPE' in fibermap.dtype.names and \\\n np.sum((fibermap['FA_TYPE'] & FA_STDSTAR_MASK) != 0) > 0:\n log.warning('Using FA_TYPE to find standard stars instead')\n yes = (fibermap['FA_TYPE'] & FA_STDSTAR_MASK) != 0\n\n return yes\n\ndef applySmoothingFilter(flux,width=200) :\n \"\"\" Return a smoothed version of the input flux array using a median filter\n\n Args:\n flux : 1D array of flux\n width : size of the median filter box\n\n Returns:\n smooth_flux : median filtered flux of same size as input\n \"\"\"\n\n # it was checked that the width of the median_filter has little impact on best fit stars\n # smoothing the ouput (with a spline for instance) does not improve the fit\n return scipy.ndimage.filters.median_filter(flux,width,mode='constant')\n#\n# Import some global constants.\n#\n# Why not use astropy constants?\n#\n# This is VERY inconvenient when trying to build documentation!\n# The documentation may be build in an environment that does not have\n# scipy installed. There is no obvious reason why this has to be a module-level\n# calculation.\n#\nimport scipy.constants as const\nh=const.h\npi=const.pi\ne=const.e\nc=const.c\nerg=const.erg\ntry:\n hc = const.h/const.erg*const.c*1.e10 # (in units of ergsA)\nexcept TypeError:\n hc = 1.9864458241717586e-08\n\ndef resample_template(data_wave_per_camera,resolution_data_per_camera,template_wave,template_flux,template_id) :\n \"\"\"Resample a spectral template on the data wavelength grid. Then convolve the spectra by the resolution\n for each camera. Also returns the result of applySmoothingFilter. This routine is used internally in\n a call to multiprocessing.Pool.\n\n Args:\n data_wave_per_camera : A dictionary of 1D array of vacuum wavelengths [Angstroms], one entry per camera and exposure.\n resolution_data_per_camera : A dictionary of resolution corresponding for the fiber, one entry per camera and exposure.\n template_wave : 1D array, input spectral template wavelength [Angstroms] (arbitrary spacing).\n template_flux : 1D array, input spectral template flux density.\n template_id : int, template identification index, used to ensure matching of input/output after a multiprocessing run.\n\n Returns:\n template_id : int, template identification index, same as input.\n output_wave : A dictionary of 1D array of vacuum wavelengths\n output_flux : A dictionary of 1D array of output template flux\n output_norm : A dictionary of 1D array of output template smoothed flux\n \"\"\"\n output_wave=np.array([])\n output_flux=np.array([])\n output_norm=np.array([])\n sorted_keys = list(data_wave_per_camera.keys())\n sorted_keys.sort() # force sorting the keys to agree with data (found unpredictable ordering in tests)\n for cam in sorted_keys :\n flux1=resample_flux(data_wave_per_camera[cam],template_wave,template_flux) # this is slow\n flux2=Resolution(resolution_data_per_camera[cam]).dot(flux1) # this is slow\n norme=applySmoothingFilter(flux2) # this is fast\n flux3=flux2/(norme+(norme==0))\n output_flux = np.append(output_flux,flux3)\n output_norm = np.append(output_norm,norme)\n output_wave = np.append(output_wave,data_wave_per_camera[cam]) # need to add wave to avoid wave/flux matching errors\n return template_id,output_wave,output_flux,output_norm\n\n\ndef _func(arg) :\n \"\"\" Used for multiprocessing.Pool \"\"\"\n return resample_template(**arg)\n\ndef _smooth_template(template_id,camera_index,template_flux) :\n \"\"\" Used for multiprocessing.Pool \"\"\"\n norme = applySmoothingFilter(template_flux)\n return template_id,camera_index,norme\n\ndef _func2(arg) :\n \"\"\" Used for multiprocessing.Pool \"\"\"\n return _smooth_template(**arg)\n\ndef redshift_fit(wave, flux, ivar, resolution_data, stdwave, stdflux, z_max=0.005, z_res=0.00005, template_error=0.):\n \"\"\" Redshift fit of a single template\n\n Args:\n wave : A dictionary of 1D array of vacuum wavelengths [Angstroms]. Example below.\n flux : A dictionary of 1D observed flux for the star\n ivar : A dictionary 1D inverse variance of flux\n resolution_data: resolution corresponding to the star's fiber\n stdwave : 1D standard star template wavelengths [Angstroms]\n stdflux : 1D[nwave] template flux\n z_max : float, maximum blueshift and redshift in scan, has to be positive\n z_res : float, step of of redshift scan between [-z_max,+z_max]\n template_error : float, assumed template flux relative error\n\n Returns:\n redshift : redshift of standard star\n\n\n Notes:\n - wave and stdwave can be on different grids that don't\n necessarily overlap\n - wave does not have to be uniform or monotonic. Multiple cameras\n can be supported by concatenating their wave and flux arrays\n \"\"\"\n cameras = list(flux.keys())\n log = get_logger()\n log.debug(time.asctime())\n\n # resampling on a log wavelength grid\n #####################################\n # need to go fast so we resample both data and model on a log grid\n\n # define grid\n minwave = 100000.\n maxwave = 0.\n for cam in cameras :\n minwave=min(minwave,np.min(wave[cam]))\n maxwave=max(maxwave,np.max(wave[cam]))\n # ala boss\n lstep=np.log10(1+z_res)\n margin=int(np.log10(1+z_max)/lstep)+1\n minlwave=np.log10(minwave)\n maxlwave=np.log10(maxwave) # desired, but readjusted\n nstep=(maxlwave-minlwave)/lstep\n\n resampled_lwave=minlwave+lstep*np.arange(nstep)\n resampled_wave=10**resampled_lwave\n\n # map data on grid\n resampled_data={}\n resampled_ivar={}\n resampled_model={}\n for cam in cameras :\n tmp_flux,tmp_ivar=resample_flux(resampled_wave,wave[cam],flux[cam],ivar[cam])\n resampled_data[cam]=tmp_flux\n resampled_ivar[cam]=tmp_ivar\n\n # we need to have the model on a larger grid than the data wave for redshifting\n dwave=wave[cam][-1]-wave[cam][-2]\n npix=int((wave[cam][-1]*z_max)/dwave+2)\n extended_cam_wave=np.append( wave[cam][0]+dwave*np.arange(-npix,0) , wave[cam])\n extended_cam_wave=np.append( extended_cam_wave, wave[cam][-1]+dwave*np.arange(1,npix+1))\n # ok now we also need to increase the resolution\n tmp_res=np.zeros((resolution_data[cam].shape[0],resolution_data[cam].shape[1]+2*npix))\n tmp_res[:,:npix] = np.tile(resolution_data[cam][:,0],(npix,1)).T\n tmp_res[:,npix:-npix] = resolution_data[cam]\n tmp_res[:,-npix:] = np.tile(resolution_data[cam][:,-1],(npix,1)).T\n # resampled model at camera resolution, with margin\n tmp=resample_flux(extended_cam_wave,stdwave,stdflux)\n tmp=Resolution(tmp_res).dot(tmp)\n # map on log lam grid\n resampled_model[cam]=resample_flux(resampled_wave,extended_cam_wave,tmp)\n\n # we now normalize both model and data\n tmp=applySmoothingFilter(resampled_data[cam])\n resampled_data[cam]/=(tmp+(tmp==0))\n resampled_ivar[cam]*=tmp**2\n\n if template_error>0 :\n ok=np.where(resampled_ivar[cam]>0)[0]\n if ok.size > 0 :\n resampled_ivar[cam][ok] = 1./ ( 1/resampled_ivar[cam][ok] + template_error**2 )\n\n tmp=applySmoothingFilter(resampled_model[cam])\n resampled_model[cam]/=(tmp+(tmp==0))\n resampled_ivar[cam]*=(tmp!=0)\n\n # fit the best redshift\n chi2=np.zeros((2*margin+1))\n ndata=np.zeros((2*margin+1))\n for i in range(-margin,margin+1) :\n for cam in cameras :\n ndata[i+margin] += np.sum(resampled_ivar[cam][margin:-margin]>0)\n if i<margin :\n chi2[i+margin] += np.sum(resampled_ivar[cam][margin:-margin]*(resampled_data[cam][margin:-margin]-resampled_model[cam][margin+i:-margin+i])**2)\n else :\n chi2[i+margin] += np.sum(resampled_ivar[cam][margin:-margin]*(resampled_data[cam][margin:-margin]-resampled_model[cam][margin+i:])**2)\n\n i=np.argmin(chi2)-margin\n z=10**(-i*lstep)-1\n log.debug(\"Best z=%f\"%z)\n '''\n log.debug(\"i=%d\"%i)\n log.debug(\"lstep=%f\"%lstep)\n log.debug(\"margin=%d\"%margin)\n plt.figure()\n #plt.plot(chi2)\n for cam in cameras :\n ok=np.where(resampled_ivar[cam]>0)[0]\n #plt.plot(resampled_wave[ok],resampled_data[cam][ok],\"o\",c=\"gray\")\n plt.errorbar(resampled_wave[ok],resampled_data[cam][ok],1./np.sqrt(resampled_ivar[cam][ok]),fmt=\"o\",color=\"gray\")\n plt.plot(resampled_wave[margin:-margin],resampled_model[cam][margin+i:-margin+i],\"-\",c=\"r\")\n plt.show()\n '''\n return z\n\n\ndef _compute_coef(coord,node_coords) :\n \"\"\" Function used by interpolate_on_parameter_grid2\n\n Args:\n coord : 1D array of coordinates of size n_axis\n node_coords : 2D array of coordinates of nodes, shape = (n_nodes,n_axis)\n\n Returns:\n coef : 1D array of linear coefficients for each node, size = n_nodes\n \"\"\"\n\n n_nodes=node_coords.shape[0]\n npar=node_coords.shape[1]\n coef=np.ones(n_nodes)\n for s in range(n_nodes) :\n coef[s]=1.\n for a in range(npar) :\n dist=np.abs(node_coords[s,a]-coord[a]) # distance between model point and node along axis a\n\n # piece-wise linear version\n if dist>1 :\n coef[s]=0.\n break\n coef[s] *= (1.-dist)\n\n # we could alternatively have used b-spline of higher order\n\n norme=np.sum(coef)\n if norme<=0 : # we are outside of valid grid\n return np.zeros(coef.shape) # will be detected in fitter\n coef /= norme\n return coef\n\n\ndef interpolate_on_parameter_grid(data_wave, data_flux, data_ivar, template_flux, teff, logg, feh, template_chi2) :\n \"\"\" 3D Interpolation routine among templates based on a grid of parameters teff, logg, feh.\n The tricky part is to define a cube on the parameter grid populated with templates, and it is not always possible.\n The routine never extrapolates, so that we stay in the range of input parameters.\n\n Args:\n data_wave : 1D[nwave] array of wavelength (concatenated list of input wavelength of different cameras and exposures)\n data_flux : 1D[nwave] array of normalized flux = (input flux)/median_filter(input flux) (concatenated list)\n data_ivar : 1D[nwave] array of inverse variance of normalized flux\n template_flux : 2D[ntemplates,nwave] array of normalized flux of templates (after resample, convolution and division by median_filter)\n teff : 1D[ntemplates]\n logg : 1D[ntemplates]\n feh : 1D[ntemplates]\n template_chi2 : 1D[ntemplatess] array of precomputed chi2 = sum(data_ivar*(data_flux-template_flux)**2)\n\n Returns:\n coefficients : best fit coefficient of linear combination of templates\n chi2 : chi2 of the linear combination\n \"\"\"\n\n log = get_logger()\n log.debug(\"starting interpolation on grid\")\n\n best_model_id = np.argmin(template_chi2)\n ndata=np.sum(data_ivar>0)\n\n log.debug(\"best model id=%d chi2/ndata=%f teff=%d logg=%2.1f feh=%2.1f\"%(best_model_id,template_chi2[best_model_id]/ndata,teff[best_model_id],logg[best_model_id],feh[best_model_id]))\n\n ntemplates=template_flux.shape[0]\n\n log_linear = False # if True , model = exp( sum_i a_i * log(template_flux_i) ), else model = sum_i a_i * template_flux_i\n\n # physical parameters define axes\n npar=3\n param=np.zeros((npar,ntemplates))\n param[0]=teff\n param[1]=logg\n param[2]=feh\n\n # grid nodes coordinates (unique values of the parameters)\n uparam=[]\n for a in range(npar) :\n uparam.append(np.unique(param[a]))\n #for a in range(npar) :\n # log.debug(\"param %d : %s\"%(a,str(uparam[a])))\n\n\n node_grid_coords=np.zeros((npar,3)).astype(int)\n for a in range(npar) : # a is an axis\n # this is the coordinate on axis 'a' of the best node\n i=np.where(uparam[a]==param[a,best_model_id])[0][0]\n node_grid_coords[a]=np.array([i-1,i,i+1])\n log.debug(\"node_grid_coords[%d]=%s\"%(a,node_grid_coords[a]))\n\n # we don't always have a template on all nodes\n node_template_ids=[]\n node_cube_coords=[]\n for i0,j0 in zip(node_grid_coords[0],[-1,0,1]) :\n for i1,j1 in zip(node_grid_coords[1],[-1,0,1]) :\n for i2,j2 in zip(node_grid_coords[2],[-1,0,1]) :\n\n # check whether coord is in grid\n in_grid = (i0>=0)&(i0<uparam[0].size)&(i1>=0)&(i1<uparam[1].size)&(i2>=0)&(i2<uparam[2].size)\n if not in_grid :\n continue\n # check whether there is a template on this node\n selection=np.where((param[0]==uparam[0][i0])&(param[1]==uparam[1][i1])&(param[2]==uparam[2][i2]))[0]\n if selection.size == 0 : # no template on node\n log.debug(\"not template for params = %f,%f,%f\"%(uparam[0][i0],uparam[1][i1],uparam[2][i2]))\n continue\n # we have one\n node_cube_coords.append([j0,j1,j2])\n node_template_ids.append(selection[0])\n node_template_ids=np.array(node_template_ids).astype(int)\n node_cube_coords=np.array(node_cube_coords).astype(int)\n\n # the parameters of the fit are npar coordinates in the range [-1,1] centered on best fit node\n coord=np.zeros(npar)\n\n n_templates = node_template_ids.size\n\n # we are done with the indexing and choice of template nodes\n node_template_flux = template_flux[node_template_ids]\n\n # compute all weighted scalar products among templates (only works if linear combination, not the log version)\n HB=np.zeros(n_templates)\n HA=np.zeros((n_templates,n_templates))\n for t in range(n_templates) :\n HB[t] = np.sum(data_ivar*data_flux*node_template_flux[t])\n for t2 in range(n_templates) :\n if HA[t2,t] != 0 :\n HA[t,t2] = HA[t2,t]\n else :\n HA[t,t2] = np.sum(data_ivar*node_template_flux[t]*node_template_flux[t2])\n\n chi2_0 = np.sum(data_ivar*data_flux**2)\n\n # chi2 = np.sum(data_ivar*(data_flux-model)**2)\n # = chi2_0 - 2*np.sum(data_ivar*data_flux*model) + np.sum(data_ivar*model**2)\n # model = sum_i coef_i model_i\n # chi2 = chi2_0 - 2* sum_i coef_i * HB[i] + sum_ij coef_i * coef_j * HA[i,j]\n # chi2 = chi2_0 - 2*np.inner(coef,HB) + np.inner(coef,HA.dot(coef))\n\n\n # initial state\n coef = _compute_coef(coord,node_cube_coords)\n chi2 = chi2_0 - 2*np.inner(coef,HB) + np.inner(coef,HA.dot(coef))\n log.debug(\"init coord=%s chi2/ndata=%f\"%(coord,chi2/ndata))\n\n # now we have to do the fit\n # fitting one axis at a time (simultaneous fit of 3 axes was tested and found inefficient : rapidly stuck on edges)\n # it has to be iterative because the model is a non-linear combination of parameters w, ex: w[0]*(1-w[1])*(1-w[2])\n for loop in range(50) :\n\n previous_chi2=chi2.copy()\n previous_coord=coord.copy()\n\n for a in range(npar) :\n previous_chi2_a=chi2.copy()\n\n # it's a linear combination of templates, but the model is non-linear function of coordinates\n # so there is no gain in trying to fit robustly with Gauss-Newton, we simply do a scan\n # it is converging rapidely (need however to iterate on axes)\n xcoord=coord.copy()\n xx=np.linspace(-1,1,41) # keep points on nodes , 41 is the resolution, 0.05 of node inter-distance\n chi2=np.zeros(xx.shape)\n for i,x in enumerate(xx) :\n xcoord[a]=x\n coef = _compute_coef(xcoord,node_cube_coords)\n if np.sum(coef)==0 : # outside valid range\n chi2[i]=1e20\n else :\n chi2[i] = chi2_0 - 2*np.inner(coef,HB) + np.inner(coef,HA.dot(coef))\n\n ibest=np.argmin(chi2)\n chi2=chi2[ibest]\n coord[a]=xx[ibest]\n\n log.debug(\"loop #%d coord=%s chi2/ndata=%f (-dchi2_loop=%f -dchi2_tot=%f)\"%(loop,coord,chi2/ndata,previous_chi2-chi2,template_chi2[best_model_id]-chi2))\n diff=np.max(np.abs(coord-previous_coord))\n if diff < 0.001 :\n break\n\n # finally perform an exact best fit per axis\n for loop in range(50) :\n previous_chi2=chi2.copy()\n previous_coord=coord.copy()\n for a in range(npar) :\n if coord[a]==-1 or coord[a]==1 :\n continue # we are on edge, no gain in refitting\n xcoord=coord.copy()\n coef_minus = _compute_coef(xcoord,node_cube_coords)\n eps=0.001\n xcoord[a] += eps\n coef_plus = _compute_coef(xcoord,node_cube_coords)\n dcoef_dcoord = (coef_plus-coef_minus)/eps # do a numeric derivative\n #log.debug(\"dcoef_dcoord=%s\"%dcoef_dcoord)\n B = np.inner(dcoef_dcoord,HB) - np.inner(dcoef_dcoord,HA.dot(coef_minus))\n A = np.inner(dcoef_dcoord,HA.dot(dcoef_dcoord))\n if A>0 :\n dcoord=B/A\n #log.debug(\"dcoord=%f\"%dcoord)\n tmp_coord=coord.copy()\n tmp_coord[a] += dcoord\n if tmp_coord[a]<-1 or tmp_coord[a]>1 :\n #log.debug(\"do not allow extrapolations\")\n continue\n coef = _compute_coef(tmp_coord,node_cube_coords)\n tmp_chi2 = chi2_0 - 2*np.inner(coef,HB) + np.inner(coef,HA.dot(coef))\n if tmp_chi2 < chi2 :\n log.debug(\"Improved chi2 by %f with a shift along %d of %f\"%(chi2-tmp_chi2,a,dcoord))\n coord=tmp_coord\n chi2 = tmp_chi2\n diff=np.max(np.abs(coord-previous_coord))\n if diff < 0.001 :\n break\n\n coef = _compute_coef(coord,node_cube_coords)\n chi2 = chi2_0 - 2*np.inner(coef,HB) + np.inner(coef,HA.dot(coef))\n\n input_number_of_templates=template_flux.shape[0]\n final_coefficients=np.zeros(input_number_of_templates)\n final_coefficients[node_template_ids]=coef\n\n log.debug(\"COORD=%s\"%coord)\n log.debug(\"COEF=%s\"%coef)\n #for i in np.where(final_coefficients>0)[0] :\n # log.debug(\"TEFF[%d]=%f\"%(i,teff[i]))\n # log.debug(\"LOGG[%d]=%f\"%(i,logg[i]))\n # log.debug(\"FEH[%d]=%f\"%(i,feh[i]))\n log.debug(\"TEFF=%f\"%np.inner(final_coefficients,teff))\n log.debug(\"LOGG=%f\"%np.inner(final_coefficients,logg))\n log.debug(\"FEH=%f\"%np.inner(final_coefficients,feh))\n log.debug(\"Contributing template Ids=%s\"%np.where(final_coefficients!=0)[0])\n\n '''\n # useful debugging plot\n import matplotlib.pyplot as plt\n plt.figure()\n ok=np.where(data_ivar>0)[0]\n ii=np.argsort(data_wave[ok])\n twave=data_wave[ok][ii]\n tflux=data_flux[ok][ii]\n tivar=data_ivar[ok][ii]\n #plt.errorbar(twave,tflux,1./np.sqrt(tivar),fmt=\"o\")\n plt.plot(twave,tflux,\".\",c=\"gray\",alpha=0.2)\n dw=np.min(twave[twave>twave[0]+0.5]-twave[0])\n bins=np.linspace(twave[0],twave[-1],(twave[-1]-twave[0])/dw+1)\n sw,junk=np.histogram(twave,bins=bins,weights=tivar)\n swx,junk=np.histogram(twave,bins=bins,weights=tivar*twave)\n swy,junk=np.histogram(twave,bins=bins,weights=tivar*tflux)\n tflux=swy[sw>0]/sw[sw>0]\n twave2=swx[sw>0]/sw[sw>0]\n terr=1./np.sqrt(sw[sw>0])\n plt.errorbar(twave2,tflux,terr,fmt=\"o\",alpha=0.5)\n model = np.zeros(data_flux.shape)\n for c,t in zip(coef,node_template_flux) :\n model += c*t\n plt.plot(twave,model[ok][ii],\"-\",c=\"r\")\n plt.show()\n '''\n\n\n return final_coefficients,chi2\n\n\ndef match_templates(wave, flux, ivar, resolution_data, stdwave, stdflux, teff, logg, feh, ncpu=1, z_max=0.005, z_res=0.00002, template_error=0):\n \"\"\"For each input spectrum, identify which standard star template is the closest\n match, factoring out broadband throughput/calibration differences.\n\n Args:\n wave : A dictionary of 1D array of vacuum wavelengths [Angstroms]. Example below.\n flux : A dictionary of 1D observed flux for the star\n ivar : A dictionary 1D inverse variance of flux\n resolution_data: resolution corresponding to the star's fiber\n stdwave : 1D standard star template wavelengths [Angstroms]\n stdflux : 2D[nstd, nwave] template flux\n teff : 1D[nstd] effective model temperature\n logg : 1D[nstd] model surface gravity\n feh : 1D[nstd] model metallicity\n ncpu : number of cpu for multiprocessing\n\n Returns:\n coef : numpy.array of linear coefficient of standard stars\n redshift : redshift of standard star\n chipdf : reduced chi2\n\n Notes:\n - wave and stdwave can be on different grids that don't\n necessarily overlap\n - wave does not have to be uniform or monotonic. Multiple cameras\n can be supported by concatenating their wave and flux arrays\n \"\"\"\n # I am treating the input arguments from three frame files as dictionary. For example\n # wave{\"r\":rwave,\"b\":bwave,\"z\":zwave}\n # Each data(3 channels) is compared to every model.\n # flux should be already flat fielded and sky subtracted.\n\n\n\n cameras = list(flux.keys())\n log = get_logger()\n log.debug(time.asctime())\n\n # fit continuum and save it\n continuum={}\n for cam in wave.keys() :\n tmp=applySmoothingFilter(flux[cam]) # this is fast\n continuum[cam] = tmp\n\n # mask out wavelength that could bias the fit\n\n log.debug(\"mask potential cosmics (3 sigma positive fluctuations)\")\n for cam in wave.keys() :\n ok=np.where((ivar[cam]>0))[0]\n if ok.size>0 :\n ivar[cam][ok] *= (flux[cam][ok]<(continuum[cam][ok]+3/np.sqrt(ivar[cam][ok])))\n\n\n log.debug(\"mask sky lines\")\n # in vacuum\n # mask blue lines that can affect fit of Balmer series\n # line at 5577 has a stellar line close to it !\n # line at 7853. has a stellar line close to it !\n # mask everything above 8270A because it can bias the star redshift\n # all of this is based on analysis of a few exposures of BOSS data\n # in vacuum\n skylines=np.array([4047.5,4359.3,5462.3,5578.9,5891.3,5897.3,6301.8,6365.4,7823.3,7855.2])\n\n hw=6. # A\n for cam in wave.keys() :\n for line in skylines :\n ivar[cam][(wave[cam]>=(line-hw))&(wave[cam]<=(line+hw))]=0.\n ivar[cam][wave[cam]>8270]=0.\n\n # mask telluric lines\n srch_filename = \"data/arc_lines/telluric_lines.txt\"\n if not resource_exists('desispec', srch_filename):\n log.error(\"Cannot find telluric mask file {:s}\".format(srch_filename))\n raise Exception(\"Cannot find telluric mask file {:s}\".format(srch_filename))\n telluric_mask_filename = resource_filename('desispec', srch_filename)\n telluric_features = np.loadtxt(telluric_mask_filename)\n log.debug(\"Masking telluric features from file %s\"%telluric_mask_filename)\n for cam in wave.keys() :\n for feature in telluric_features :\n ivar[cam][(wave[cam]>=feature[0])&(wave[cam]<=feature[1])]=0.\n\n\n\n # add error propto to flux to account for model error\n if template_error>0 :\n for cam in wave.keys() :\n ok=np.where(ivar[cam]>0)[0]\n if ok.size>0 :\n ivar[cam][ok] = 1./ ( 1./ivar[cam][ok] + (template_error*continuum[cam][ok] )**2 )\n\n # normalize data and store them in single array\n data_wave=np.array([])\n data_flux=np.array([])\n data_continuum=np.array([])\n data_ivar=np.array([])\n data_index=np.array([])\n sorted_keys = list(wave.keys())\n sorted_keys.sort() # force sorting the keys to agree with models (found unpredictable ordering in tests)\n for index,cam in enumerate(sorted_keys) :\n data_index=np.append(data_index,np.ones(wave[cam].size)*index)\n data_wave=np.append(data_wave,wave[cam])\n data_flux=np.append(data_flux,flux[cam]/(continuum[cam]+(continuum[cam]==0)))\n data_continuum=np.append(data_continuum,continuum[cam])\n data_ivar=np.append(data_ivar,ivar[cam]*continuum[cam]**2)\n data_index=data_index.astype(int)\n\n ndata = np.sum(data_ivar>0)\n\n\n # start looking at models\n\n # find canonical f-type model: Teff=6000, logg=4, Fe/H=-1.5\n canonical_model=np.argmin((teff-6000.0)**2+(logg-4.0)**2+(feh+1.5)**2)\n\n # fit redshift on canonical model\n # we use the original data to do this\n # because we resample both the data and model on a logarithmic grid in the routine\n\n if True : # mask Ca H&K lines. Present in ISM, can bias the stellar redshift fit\n log.debug(\"Mask ISM lines for redshift\")\n ismlines=np.array([3934.77,3969.59])\n hw=6. # A\n for cam in wave.keys() :\n for line in ismlines :\n ivar[cam][(wave[cam]>=(line-hw))&(wave[cam]<=(line+hw))]=0.\n\n z = redshift_fit(wave, flux, ivar, resolution_data, stdwave, stdflux[canonical_model], z_max, z_res)\n\n # now we go back to the model spectra , redshift them, resample, apply resolution, normalize and chi2 match\n\n ntemplates=stdflux.shape[0]\n\n # here we take into account the redshift once and for all\n shifted_stdwave=stdwave*(1+z)\n\n func_args = []\n # need to parallelize the model resampling\n for template_id in range(ntemplates) :\n arguments={\"data_wave_per_camera\":wave,\n \"resolution_data_per_camera\":resolution_data,\n \"template_wave\":shifted_stdwave,\n \"template_flux\":stdflux[template_id],\n \"template_id\":template_id}\n func_args.append( arguments )\n\n\n if ncpu > 1:\n log.debug(\"creating multiprocessing pool with %d cpus\"%ncpu); sys.stdout.flush()\n pool = multiprocessing.Pool(ncpu)\n log.debug(\"Running pool.map() for {} items\".format(len(func_args))); sys.stdout.flush()\n results = pool.map(_func, func_args)\n log.debug(\"Finished pool.map()\"); sys.stdout.flush()\n pool.close()\n pool.join()\n log.debug(\"Finished pool.join()\"); sys.stdout.flush()\n else:\n log.debug(\"Not using multiprocessing for {} cpus\".format(ncpu))\n\n results = [_func(x) for x in func_args]\n log.debug(\"Finished serial loop\")\n\n # collect results\n # in case the exit of the multiprocessing pool is not ordered as the input\n # we returned the template_id\n template_flux=np.zeros((ntemplates,data_flux.size))\n template_norm=np.zeros((ntemplates,data_flux.size))\n for result in results :\n template_id = result[0]\n template_tmp_wave = result[1]\n template_tmp_flux = result[2]\n template_tmp_norm = result[3]\n mdiff=np.max(np.abs(data_wave-template_tmp_wave)) # just a safety check\n if mdiff>1.e-5 :\n log.error(\"error indexing of wave and flux somewhere above, checking if it's just an ordering issue, max diff=%f\"%mdiff)\n raise ValueError(\"wavelength array difference cannot be fixed with reordering, ordered max diff=%f\"%mdiff)\n template_flux[template_id] = template_tmp_flux\n template_norm[template_id] = template_tmp_norm\n\n # compute model chi2\n template_chi2=np.zeros(ntemplates)\n for template_id in range(ntemplates) :\n template_chi2[template_id] = np.sum(data_ivar*(data_flux-template_flux[template_id])**2)\n\n best_model_id=np.argmin(template_chi2)\n best_chi2=template_chi2[best_model_id]\n log.debug(\"selected best model {} chi2/ndf {}\".format(best_model_id, best_chi2/ndata))\n\n # interpolate around best model using parameter grid\n coef,chi2 = interpolate_on_parameter_grid(data_wave, data_flux, data_ivar, template_flux, teff, logg, feh, template_chi2)\n log.debug(\"after interpolation chi2/ndf {}\".format(chi2/ndata))\n\n log.debug(\"use best fit to derive calibration and apply it to the templates before refitting the star ...\")\n # the division by the median filtered spectrum leaves some imprint of the input transmission\n # so we will apply calibration to the model and redo the whole fit\n # to make sure this is not driving the stellar model selection.\n\n\n log.debug(\"remultiply template by their norme\")\n template_flux *= template_norm\n\n log.debug(\"compute best fit model\")\n model=np.zeros(data_wave.size)\n for c,t in zip(coef,template_flux) :\n if c>0 : model += c*t\n\n\n func_args=[]\n for index in np.unique(data_index) :\n log.debug(\"compute calib for cam index %d\"%index)\n ii=np.where(data_index==index)[0]\n calib = (data_flux[ii]*data_continuum[ii])/(model[ii]+(model[ii]==0))\n scalib = applySmoothingFilter(calib,width=400)\n\n min_scalib=0.\n bad=scalib<=min_scalib\n if np.sum(bad)>0 :\n scalib[bad]=min_scalib\n\n log.debug(\"multiply templates by calib for cam index %d\"%index)\n template_flux[:,ii] *= scalib\n\n # apply this to all the templates and recompute median filter\n for t in range(template_flux.shape[0]) :\n arguments={\"template_id\":t,\"camera_index\":index,\"template_flux\":template_flux[t][ii]}\n func_args.append(arguments)\n\n if ncpu > 1:\n log.debug(\"divide templates by median filters using multiprocessing.Pool of ncpu=%d\"%ncpu)\n pool = multiprocessing.Pool(ncpu)\n results = pool.map(_func2, func_args)\n log.debug(\"finished pool.map()\"); sys.stdout.flush()\n pool.close()\n pool.join()\n log.debug(\"finished pool.join()\"); sys.stdout.flush()\n else :\n log.debug(\"divide templates serially\")\n results = [_func2(x) for x in func_args]\n log.debug(\"Finished serial loop\")\n\n # collect results\n for result in results :\n template_id = result[0]\n index = result[1]\n template_flux[template_id][data_index==index] /= (result[2] + (result[2]==0))\n\n log.debug(\"refit the model ...\")\n template_chi2=np.zeros(ntemplates)\n for template_id in range(ntemplates) :\n template_chi2[template_id] = np.sum(data_ivar*(data_flux-template_flux[template_id])**2)\n\n best_model_id=np.argmin(template_chi2)\n best_chi2=template_chi2[best_model_id]\n\n log.debug(\"selected best model {} chi2/ndf {}\".format(best_model_id, best_chi2/ndata))\n\n # interpolate around best model using parameter grid\n coef,chi2 = interpolate_on_parameter_grid(data_wave, data_flux, data_ivar, template_flux, teff, logg, feh, template_chi2)\n log.debug(\"after interpolation chi2/ndf {}\".format(chi2/ndata))\n\n\n return coef,z,chi2/ndata\n\n\ndef normalize_templates(stdwave, stdflux, mag, band, photsys):\n \"\"\"Returns spectra normalized to input magnitudes.\n\n Args:\n stdwave : 1D array of standard star wavelengths [Angstroms]\n stdflux : 1D observed flux\n mag : float desired magnitude\n band : G,R,Z,W1 or W2\n photsys : N or S (for Legacy Survey North or South)\n\n Returns:\n stdwave : same as input\n normflux : normalized flux array\n\n Only SDSS_r band is assumed to be used for normalization for now.\n \"\"\"\n log = get_logger()\n fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom\n filter_response=load_legacy_survey_filter(band,photsys)\n apMag=filter_response.get_ab_magnitude(stdflux*fluxunits,stdwave)\n scalefac=10**((apMag-mag)/2.5)\n log.debug('scaling mag {:.3f} to {:.3f} using scalefac {:.3f}'.format(apMag,mag, scalefac))\n normflux=stdflux*scalefac\n\n return normflux\n\ndef compute_flux_calibration(frame, input_model_wave,input_model_flux,input_model_fibers, nsig_clipping=10.,deg=2,debug=False,highest_throughput_nstars=0) :\n\n \"\"\"Compute average frame throughput based on data frame.(wave,flux,ivar,resolution_data)\n and spectro-photometrically calibrated stellar models (model_wave,model_flux).\n Wave and model_wave are not necessarily on the same grid\n\n Args:\n frame : Frame object with attributes wave, flux, ivar, resolution_data\n input_model_wave : 1D[nwave] array of model wavelengths\n input_model_flux : 2D[nstd, nwave] array of model fluxes\n input_model_fibers : 1D[nstd] array of model fibers\n nsig_clipping : (optional) sigma clipping level\n\n Returns:\n desispec.FluxCalib object\n calibration: mean calibration (without resolution)\n\n Notes:\n - we first resample the model on the input flux wave grid\n - then convolve it to the data resolution (the input wave grid is supposed finer than the spectral resolution)\n - then iteratively\n - fit the mean throughput (deconvolved, this is needed because of sharp atmospheric absorption lines)\n - compute broad band correction to fibers (to correct for small mis-alignement for instance)\n - perform outlier rejection\n\n There is one subtelty with the relation between calibration and resolution.\n - The input frame flux is on average flux^frame_fiber = R_fiber*C*flux^true where C is the true calibration (or throughput)\n which is a function of wavelength. This is the system we solve.\n - But we want to return a calibration vector per fiber C_fiber defined by flux^cframe_fiber = flux^frame_fiber/C_fiber,\n such that flux^cframe can be compared with a convolved model of the truth, flux^cframe_fiber = R_fiber*flux^true,\n i.e. (R_fiber*C*flux^true)/C_fiber = R_fiber*true_flux, giving C_fiber = (R_fiber*C*flux^true)/(R_fiber*flux^true)\n - There is no solution for this for all possible input specta. The solution for a flat spectrum is returned,\n which is very close to C_fiber = R_fiber*C (but not exactly).\n\n \"\"\"\n\n log=get_logger()\n log.info(\"starting\")\n\n # add margin to frame\n def add_margin_2d_dim1(iarray,margin) :\n shape=(iarray.shape[0],iarray.shape[1]+2*margin)\n oarray=np.zeros(shape,dtype=iarray.dtype)\n oarray[:,:margin]=iarray[:,0][:,None]\n oarray[:,margin:-margin]=iarray\n oarray[:,-margin:]=iarray[:,-1][:,None]\n return oarray\n def add_margin_3d_dim2(iarray,margin) :\n shape=(iarray.shape[0],iarray.shape[1],iarray.shape[2]+2*margin)\n oarray=np.zeros(shape,dtype=iarray.dtype)\n oarray[:,:,:margin]=iarray[:,:,0][:,:,None]\n oarray[:,:,margin:-margin]=iarray\n oarray[:,:,-margin:]=iarray[:,:,-1][:,:,None]\n return oarray\n\n margin = 3\n log.info(\"adding margin of {} pixels on each side\".format(margin))\n nwave=frame.wave.size\n dw=frame.wave[1]-frame.wave[0]\n wave_with_margin=np.zeros(nwave+2*margin)\n wave_with_margin[margin:nwave+margin]=frame.wave\n wave_with_margin[0:margin]=frame.wave[0]+dw*np.arange(-margin,0)\n wave_with_margin[nwave+margin:]=frame.wave[-1]+dw*np.arange(1,margin+1)\n tframe = copy.deepcopy(frame)\n tframe.wave = wave_with_margin\n tframe.nwave = tframe.wave.size\n tframe.flux = add_margin_2d_dim1(frame.flux,margin)\n tframe.ivar = add_margin_2d_dim1(frame.ivar,margin)\n tframe.mask = add_margin_2d_dim1(frame.mask,margin)\n tframe.resolution_data = add_margin_3d_dim2(frame.resolution_data,margin)\n tframe.R = np.array( [Resolution(r) for r in tframe.resolution_data] )\n\n #- Pull out just the standard stars for convenience, but keep the\n #- full frame of spectra around because we will later need to convolved\n #- the calibration vector for each fiber individually\n stdfibers = np.where(isStdStar(tframe.fibermap))[0]\n assert len(stdfibers) > 0\n\n if not np.all(np.in1d(stdfibers, input_model_fibers)):\n bad = set(input_model_fibers) - set(stdfibers)\n if len(bad) > 0:\n log.error('Discarding input_model_fibers that are not standards: {}'.format(bad))\n stdfibers = np.intersect1d(stdfibers, input_model_fibers)\n\n # also other way around\n stdfibers = np.intersect1d(input_model_fibers, stdfibers)\n log.info(\"Std stars fibers: {}\".format(stdfibers))\n\n stdstars = tframe[stdfibers]\n\n nwave=stdstars.nwave\n nstds=stdstars.flux.shape[0]\n\n dwave=(stdstars.wave-np.mean(stdstars.wave))/(stdstars.wave[-1]-stdstars.wave[0]) # normalized wave for polynomial fit\n\n # resample model to data grid and convolve by resolution\n model_flux=np.zeros((nstds, nwave))\n convolved_model_flux=np.zeros((nstds, nwave))\n\n for star in range(nstds) :\n model_flux_index = np.where(input_model_fibers == stdfibers[star])[0][0]\n model_flux[star]=resample_flux(stdstars.wave,input_model_wave,input_model_flux[model_flux_index])\n convolved_model_flux[star]=stdstars.R[star].dot(model_flux[star])\n\n input_model_flux = None # I shall not use any more the input_model_flux here\n\n # iterative fitting and clipping to get precise mean spectrum\n current_ivar=stdstars.ivar*(stdstars.mask==0)\n\n #- Start with a first pass median rejection\n calib = (convolved_model_flux!=0)*(stdstars.flux/(convolved_model_flux + (convolved_model_flux==0)))\n median_calib = np.median(calib, axis=0)\n\n # First fit of smooth correction per fiber, and 10% model error to variance, and perform first outlier rejection\n smooth_fiber_correction=np.ones((stdstars.flux.shape))\n chi2=np.zeros((stdstars.flux.shape))\n\n badfiber=np.zeros(nstds,dtype=int)\n\n for star in range(nstds) :\n if badfiber[star] : continue\n if np.sum(current_ivar[star]) == 0 :\n log.warning(\"null inverse variance for star {}\".format(star))\n badfiber[star] = 1\n continue\n\n M = median_calib*stdstars.R[star].dot(model_flux[star])\n\n try:\n ii = np.where(M>0.1*np.mean(M))[0]\n if ii.size == 0 :\n current_ivar[star]=0.\n badfiber[star] = 1\n continue\n pol=np.poly1d(np.polyfit(dwave[ii],stdstars.flux[star,ii]/M[ii],deg=deg,w=current_ivar[star,ii]*M[ii]**2))\n smooth_fiber_correction[star]=pol(dwave)\n except ValueError :\n log.warning(\"polynomial fit for star %d failed\"%star)\n current_ivar[star]=0.\n badfiber[star] = 1\n continue\n except numpy.linalg.LinAlgError :\n log.warning(\"polynomial fit for star %d failed\"%star)\n current_ivar[star]=0.\n badfiber[star] = 1\n continue\n\n # add few percent multiplicative error to ivar for sigma clipping\n chi2[star]=(current_ivar[star]>0)*(stdstars.flux[star]-smooth_fiber_correction[star]*M)**2/(1./(current_ivar[star] + (current_ivar[star]==0))+(0.1*stdstars.flux[star])**2)\n # checking indexing using mags\n #from desispec.io.filters import load_legacy_survey_filter\n #from astropy import units\n #filter_response=load_legacy_survey_filter(\"R\",\"N\")\n #fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom\n #dummy_wave = np.linspace(3000,12000,12000-3000)\n #dummy_flux = np.interp(dummy_wave,stdstars.wave,M,left=0,right=0)\n #mag = filter_response.get_ab_magnitude(dummy_flux*fluxunits,dummy_wave)\n #fmapmag = -2.5*np.log10(stdstars.fibermap[\"FLUX_R\"][star])+22.5\n #print(\"star index={} flux ratio={}\".format(star,10**(0.4*(mag-fmapmag))))\n\n bad=(chi2>nsig_clipping**2)\n current_ivar[bad] = 0\n\n sqrtw=np.sqrt(current_ivar)\n sqrtwflux=np.sqrt(current_ivar)*stdstars.flux\n\n # diagonal sparse matrices\n D1=scipy.sparse.lil_matrix((nwave,nwave))\n D2=scipy.sparse.lil_matrix((nwave,nwave))\n\n\n nout_tot=0\n previous_mean=0.\n for iteration in range(20) :\n\n # fit mean calibration\n A=scipy.sparse.lil_matrix((nwave,nwave)).tocsr()\n B=np.zeros((nwave))\n\n # loop on star to handle resolution\n for star in range(nstds) :\n if star%10==0 :\n log.info(\"iter %d star %d\"%(iteration,star))\n\n if badfiber[star]: continue\n\n R = stdstars.R[star]\n\n # diagonal sparse matrix with content = sqrt(ivar)*flat\n D1.setdiag(sqrtw[star]*smooth_fiber_correction[star])\n D2.setdiag(model_flux[star])\n sqrtwmodelR = D1.dot(R.dot(D2)) # chi2 = sum (sqrtw*data_flux -diag(sqrtw)*smooth_fiber_correction*R*diag(model_flux)*calib )\n\n A = A+(sqrtwmodelR.T*sqrtwmodelR).tocsr()\n B += sqrtwmodelR.T*sqrtwflux[star]\n\n if np.sum(current_ivar>0)==0 :\n log.error(\"null ivar, cannot calibrate this frame\")\n raise ValueError(\"null ivar, cannot calibrate this frame\")\n\n #- Add a weak prior that calibration = median_calib\n #- to keep A well conditioned\n minivar = np.min(current_ivar[current_ivar>0])\n log.debug('min(ivar[ivar>0]) = {}'.format(minivar))\n epsilon = minivar/10000\n A = epsilon*np.eye(nwave) + A #- converts sparse A -> dense A\n B += median_calib*epsilon\n\n log.info(\"iter %d solving\"%iteration)\n ### log.debug('cond(A) {:g}'.format(np.linalg.cond(A)))\n #calibration=cholesky_solve(A, B)\n w = np.diagonal(A)>0\n A_pos_def = A[w,:]\n A_pos_def = A_pos_def[:,w]\n calibration = B*0\n try:\n calibration[w]=cholesky_solve(A_pos_def, B[w])\n except np.linalg.linalg.LinAlgError :\n log.info('cholesky fails in iteration {}, trying svd'.format(iteration))\n calibration[w] = np.linalg.lstsq(A_pos_def,B[w])[0]\n\n wmask = (np.diagonal(A)<=0)\n if np.sum(wmask)>0 :\n wmask = wmask.astype(float)\n wmask = R.dot(R.dot(wmask))\n bad = np.where(wmask!=0)[0]\n log.info(\"nbad={}\".format(bad.size))\n good = np.where(wmask==0)[0]\n calibration[bad] = np.interp(bad,good,calibration[good],left=0,right=0)\n\n log.info(\"iter %d fit smooth correction per fiber\"%iteration)\n # fit smooth fiberflat and compute chi2\n for star in range(nstds) :\n if star%10==0 :\n log.info(\"iter %d fiber %d(smooth)\"%(iteration,star))\n\n if badfiber[star]: continue\n\n M = stdstars.R[star].dot(calibration*model_flux[star])\n\n try:\n ii = np.where(M>0.1*np.mean(M))[0]\n if ii.size == 0 :\n current_ivar[star]=0.\n badfiber[star] = 1\n continue\n pol=np.poly1d(np.polyfit(dwave[ii],stdstars.flux[star,ii]/M[ii],deg=deg,w=current_ivar[star,ii]*M[ii]**2))\n smooth_fiber_correction[star]=pol(dwave)\n except ValueError as e :\n log.warning(\"polynomial fit for star %d failed\"%star)\n current_ivar[star]=0.\n badfiber[star] = 1.\n continue\n except numpy.linalg.LinAlgError as e :\n log.warning(\"polynomial fit for star %d failed\"%star)\n current_ivar[star]=0.\n badfiber[star] = 1.\n continue\n chi2[star]=current_ivar[star]*(stdstars.flux[star]-smooth_fiber_correction[star]*M)**2\n\n log.info(\"iter {0:d} rejecting\".format(iteration))\n\n nout_iter=0\n if iteration<1 :\n # only remove worst outlier per wave\n # apply rejection iteratively, only one entry per wave among stars\n # find waves with outlier (fastest way)\n nout_per_wave=np.sum(chi2>nsig_clipping**2,axis=0)\n selection=np.where(nout_per_wave>0)[0]\n for i in selection :\n worst_entry=np.argmax(chi2[:,i])\n current_ivar[worst_entry,i]=0\n sqrtw[worst_entry,i]=0\n #sqrtwmodel[worst_entry,i]=0\n sqrtwflux[worst_entry,i]=0\n nout_iter += 1\n\n else :\n # remove all of them at once\n bad=(chi2>nsig_clipping**2)\n current_ivar *= (bad==0)\n sqrtw *= (bad==0)\n #sqrtwmodel *= (bad==0)\n sqrtwflux *= (bad==0)\n nout_iter += np.sum(bad)\n\n nout_tot += nout_iter\n\n sum_chi2=float(np.sum(chi2))\n ndf=int(np.sum(chi2>0)-nwave-nstds*2)\n chi2pdf=0.\n if ndf>0 :\n chi2pdf=sum_chi2/ndf\n\n # normalize to preserve the average throughput\n # and throughput = < data/model/correction >\n # and we would like to have throughput = < data/model >\n # (we don't do that directly to reduce noise)\n # so we want to average the inverse of the smooth correction\n mean=1./np.nanmean(1./smooth_fiber_correction[badfiber==0],axis=0)\n medcorr = np.median(smooth_fiber_correction,axis=1)\n log.info(\"median correction = {}\".format(medcorr))\n\n if highest_throughput_nstars > 0 :\n log.info(\"keeping {} stars with highest throughput\".format(highest_throughput_nstars))\n ii=np.argsort(medcorr)[::-1][:highest_throughput_nstars]\n log.info(\"use those fibers = {}\".format(stdfibers[ii]))\n log.info(\"with median correction = {}\".format(medcorr[ii]))\n mean=1./np.nanmean(1./smooth_fiber_correction[ii][badfiber[ii]==0],axis=0)\n else :\n mmedcorr = np.median(medcorr)\n rmscorr = 1.4*np.median(np.abs(medcorr-mmedcorr))\n log.info(\"mean rms correction = {} {}\".format(mmedcorr,rmscorr))\n bad=(np.abs(medcorr-mmedcorr)>3*rmscorr)\n if np.sum(bad)>0 :\n good=(np.abs(medcorr-mmedcorr)<=3*rmscorr)\n log.info(\"use {} stars, discarding 3 sigma outlier stars with medcorr = {}\".format(np.sum(good),medcorr[bad]))\n mean=1./np.nanmean(1./smooth_fiber_correction[good][badfiber[good]==0],axis=0)\n else :\n log.info(\"use {} stars\".format(medcorr.size))\n\n smooth_fiber_correction /= mean\n\n log.info(\"iter #%d chi2=%f ndf=%d chi2pdf=%f nout=%d mean=%f\"%(iteration,sum_chi2,ndf,chi2pdf,nout_iter,np.mean(mean)))\n\n if nout_iter == 0 and np.max(np.abs(mean-previous_mean))<0.0001 :\n break\n previous_mean = mean\n\n # smooth_fiber_correction does not converge exactly to one on average, so we apply its mean to the calibration\n # (tested on sims)\n calibration /= mean\n\n log.info(\"nout tot=%d\"%nout_tot)\n\n # solve once again to get deconvolved variance\n #calibration,calibcovar=cholesky_solve_and_invert(A.todense(),B)\n calibcovar=np.linalg.inv(A)\n calibvar=np.diagonal(calibcovar)\n log.info(\"mean(var)={0:f}\".format(np.mean(calibvar)))\n\n calibvar=np.array(np.diagonal(calibcovar))\n # apply the mean (as in the iterative loop)\n calibvar *= mean**2\n calibivar=(calibvar>0)/(calibvar+(calibvar==0))\n\n # we also want to save the convolved calibration and a calibration variance\n # first compute average resolution\n mean_res_data=np.mean(tframe.resolution_data,axis=0)\n R = Resolution(mean_res_data)\n # compute convolved calib\n ccalibration = np.zeros(tframe.flux.shape)\n for i in range(tframe.nspec):\n norme = tframe.R[i].dot(np.ones(calibration.shape))\n ok=np.where(norme>0)[0]\n if ok.size :\n ccalibration[i][ok]=tframe.R[i].dot(calibration)[ok]/norme[ok]\n\n # Use diagonal of mean calibration covariance for output.\n ccalibcovar=R.dot(calibcovar).dot(R.T.todense())\n ccalibvar=np.array(np.diagonal(ccalibcovar))\n\n # apply the mean (as in the iterative loop)\n ccalibvar *= mean**2\n ccalibivar=(ccalibvar>0)/(ccalibvar+(ccalibvar==0))\n\n # at least a few stars at each wavelength\n min_number_of_stars = min(3,max(1,nstds//2))\n nstars_with_signal=np.sum(current_ivar>0,axis=0)\n bad = (nstars_with_signal<min_number_of_stars)\n nallbad = np.sum(nstars_with_signal==0)\n # increase by 1 pixel\n bad[1:-1] |= bad[2:]\n bad[1:-1] |= bad[:-2]\n nbad=np.sum(bad>0)\n log.info(\"Requesting at least {} star spectra at each wavelength results in masking {} add. flux bins ({} already masked)\".format(min_number_of_stars,nbad-nallbad,nallbad))\n\n ccalibivar[bad]=0.\n ccalibration[:,bad]=0.\n\n # convert to 2D\n # For now this is the same for all fibers; in the future it may not be\n ccalibivar = np.tile(ccalibivar, tframe.nspec).reshape(tframe.nspec, tframe.nwave)\n\n # need to do better here\n mask = tframe.mask.copy()\n\n mccalibration = R.dot(calibration)\n\n # trim back\n ccalibration=ccalibration[:,margin:-margin]\n ccalibivar=ccalibivar[:,margin:-margin]\n mask=mask[:,margin:-margin]\n mccalibration=mccalibration[margin:-margin]\n stdstars.wave=stdstars.wave[margin:-margin]\n\n # return calibration, calibivar, mask, ccalibration, ccalibivar\n return FluxCalib(stdstars.wave, ccalibration, ccalibivar, mask, mccalibration)\n\n\n\nclass FluxCalib(object):\n def __init__(self, wave, calib, ivar, mask, meancalib=None):\n \"\"\"Lightweight wrapper object for flux calibration vectors\n\n Args:\n wave : 1D[nwave] input wavelength (Angstroms)\n calib: 2D[nspec, nwave] calibration vectors for each spectrum\n ivar : 2D[nspec, nwave] inverse variance of calib\n mask : 2D[nspec, nwave] mask of calib (0=good)\n meancalib : 1D[nwave] mean convolved calibration (optional)\n\n All arguments become attributes, plus nspec,nwave = calib.shape\n\n The calib vector should be such that\n\n [1e-17 erg/s/cm^2/A] = [photons/A] / calib\n \"\"\"\n assert wave.ndim == 1\n assert calib.ndim == 2\n assert calib.shape == ivar.shape\n assert calib.shape == mask.shape\n assert np.all(ivar >= 0)\n\n self.nspec, self.nwave = calib.shape\n self.wave = wave\n self.calib = calib\n self.ivar = ivar\n self.mask = util.mask32(mask)\n self.meancalib = meancalib\n\n self.meta = dict(units='photons/(erg/s/cm^2)')\n\n def __repr__(self):\n txt = '<{:s}: nspec={:d}, nwave={:d}, units={:s}'.format(\n self.__class__.__name__, self.nspec, self.nwave, self.meta['units'])\n\n # Finish\n txt = txt + '>'\n return (txt)\n\n\ndef apply_flux_calibration(frame, fluxcalib):\n \"\"\"\n Applies flux calibration to input flux and ivar\n\n Args:\n frame: Spectra object with attributes wave, flux, ivar, resolution_data\n fluxcalib : FluxCalib object with wave, calib, ...\n\n Modifies frame.flux and frame.ivar\n \"\"\"\n log=get_logger()\n log.info(\"starting\")\n\n # check same wavelength, die if not the case\n mval=np.max(np.abs(frame.wave-fluxcalib.wave))\n #if mval > 0.00001 :\n if mval > 0.001 :\n log.error(\"not same wavelength (should raise an error instead)\")\n sys.exit(12)\n\n nwave=frame.nwave\n nfibers=frame.nspec\n\n \"\"\"\n F'=F/C\n Var(F') = Var(F)/C**2 + F**2*( d(1/C)/dC )**2*Var(C)\n = 1/(ivar(F)*C**2) + F**2*(1/C**2)**2*Var(C)\n = 1/(ivar(F)*C**2) + F**2*Var(C)/C**4\n = 1/(ivar(F)*C**2) + F**2/(ivar(C)*C**4)\n \"\"\"\n # for fiber in range(nfibers) :\n # C = fluxcalib.calib[fiber]\n # flux[fiber]=frame.flux[fiber]*(C>0)/(C+(C==0))\n # ivar[fiber]=(ivar[fiber]>0)*(civar[fiber]>0)*(C>0)/( 1./((ivar[fiber]+(ivar[fiber]==0))*(C**2+(C==0))) + flux[fiber]**2/(civar[fiber]*C**4+(civar[fiber]*(C==0))) )\n\n C = fluxcalib.calib\n frame.flux = frame.flux * (C>0) / (C+(C==0))\n frame.ivar *= (fluxcalib.ivar>0) * (C>0)\n for i in range(nfibers) :\n ok=np.where(frame.ivar[i]>0)[0]\n if ok.size>0 :\n frame.ivar[i,ok] = 1./( 1./(frame.ivar[i,ok]*C[i,ok]**2)+frame.flux[i,ok]**2/(fluxcalib.ivar[i,ok]*C[i,ok]**4) )\n\n\ndef ZP_from_calib(exptime, wave, calib):\n \"\"\" Calculate the ZP in AB magnitudes given the calibration and the wavelength arrays\n Args:\n exptime: float; exposure time in seconds\n wave: 1D array (A)\n calib: 1D array (converts erg/s/A to photons/s/A)\n\n Returns:\n ZP_AB: 1D array of ZP values in AB magnitudes\n\n \"\"\"\n ZP_flambda = 1e-17 / (calib/exptime) # erg/s/cm^2/A\n ZP_fnu = ZP_flambda * wave**2 / (2.9979e18) # c in A/s\n # Avoid 0 values\n ZP_AB = np.zeros_like(ZP_fnu)\n gdZ = ZP_fnu > 0.\n ZP_AB[gdZ] = -2.5 * np.log10(ZP_fnu[gdZ]) - 48.6\n # Return\n return ZP_AB\n\n\ndef qa_fluxcalib(param, frame, fluxcalib):\n \"\"\"\n Args:\n param: dict of QA parameters\n frame: Frame\n fluxcalib: FluxCalib\n\n Returns:\n qadict: dict of QA outputs\n Need to record simple Python objects for yaml (str, float, int)\n\n \"\"\"\n log = get_logger()\n qadict = {}\n\n # Unpack model\n exptime = frame.meta['EXPTIME']\n\n # Standard stars\n stdfibers = np.where(isStdStar(frame.fibermap))[0]\n stdstars = frame[stdfibers]\n nstds = len(stdfibers)\n\n # Calculate ZP for mean spectrum\n #medcalib = np.median(fluxcalib.calib,axis=0)\n medcalib = np.median(fluxcalib.calib[stdfibers],axis=0)\n ZP_AB = ZP_from_calib(exptime, fluxcalib.wave, medcalib) # erg/s/cm^2/A\n\n # ZP at fiducial wavelength (AB mag for 1 photon/s/A)\n iZP = np.argmin(np.abs(fluxcalib.wave-param['ZP_WAVE']))\n qadict['ZP'] = float(np.median(ZP_AB[iZP-10:iZP+10]))\n\n # Unpack star data\n #sqrtwmodel, sqrtwflux, current_ivar, chi2 = indiv_stars\n\n # RMS\n qadict['NSTARS_FIBER'] = int(nstds)\n ZP_fiducial = np.zeros(nstds)\n\n for ii in range(nstds):\n # Good pixels\n gdp = stdstars.ivar[ii, :] > 0.\n if not np.any(gdp):\n continue\n icalib = fluxcalib.calib[stdfibers[ii]][gdp]\n i_wave = fluxcalib.wave[gdp]\n # ZP\n ZP_stars = ZP_from_calib(exptime, i_wave, icalib)\n iZP = np.argmin(np.abs(i_wave-param['ZP_WAVE']))\n ZP_fiducial[ii] = float(np.median(ZP_stars[iZP-10:iZP+10]))\n #import pdb; pdb.set_trace()\n qadict['RMS_ZP'] = float(np.std(ZP_fiducial))\n\n # MAX ZP Offset\n #stdfibers = np.where(frame.fibermap['OBJTYPE'] == 'STD')[0]\n ZPoffset = ZP_fiducial-qadict['ZP']\n imax = np.argmax(np.abs(ZPoffset))\n qadict['MAX_ZP_OFF'] = [float(ZPoffset[imax]),\n int(stdfibers[np.argmax(ZPoffset)])]\n if qadict['MAX_ZP_OFF'][0] > param['MAX_ZP_OFF']:\n log.warning(\"Bad standard star ZP {:g}, in fiber {:d}\".format(\n qadict['MAX_ZP_OFF'][0], qadict['MAX_ZP_OFF'][1]))\n # Return\n return qadict\n" ]
[ [ "numpy.sum", "numpy.sqrt", "numpy.abs", "numpy.unique", "numpy.min", "numpy.inner", "numpy.median", "numpy.gradient", "numpy.max", "numpy.log10", "numpy.mean", "numpy.interp", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.where" ], [ "numpy.polyfit", "numpy.sqrt", "numpy.linspace", "numpy.in1d", "numpy.all", "numpy.max", "numpy.zeros_like", "numpy.argmin", "numpy.mean", "numpy.nanmean", "numpy.any", "numpy.where", "numpy.unique", "numpy.arange", "numpy.eye", "numpy.intersect1d", "numpy.std", "numpy.argmax", "numpy.interp", "numpy.count_nonzero", "numpy.zeros", "numpy.min", "numpy.linalg.inv", "numpy.median", "numpy.linalg.lstsq", "numpy.append", "numpy.log10", "numpy.argsort", "scipy.ndimage.filters.median_filter", "numpy.array", "numpy.diagonal", "numpy.sum", "numpy.abs", "numpy.inner", "numpy.tile", "numpy.ones", "numpy.loadtxt", "scipy.sparse.lil_matrix" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.10", "1.3", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16" ], "tensorflow": [] } ]
Duane321/pyprobml
[ "6d0ba29f22dc7fec9dfc73788bc5520e97663bdb", "6d0ba29f22dc7fec9dfc73788bc5520e97663bdb" ]
[ "examples/kmeansHeightWeight.py", "examples/lossFunctionFig.py" ]
[ "#!/usr/bin/env python\n\nimport matplotlib.pyplot as pl\nimport numpy as np\nfrom utils import util\nfrom sklearn.cluster import KMeans\nfrom utils.util import save_fig\n\ndata = util.load_mat('heightWeight/heightWeight')\ndata = data['heightWeightData']\nmarkers = 'Dox'\ncolors = 'rgb'\n\nfor i in range(3):\n KM_model = KMeans(init='k-means++', n_clusters=i+1)\n labels = KM_model.fit_predict(data[:, [1, 2]])\n labels_unique = np.unique(labels)\n fig = pl.figure(i)\n for j in range(len(labels_unique)):\n data_chosen = data[labels == labels_unique[j]]\n pl.scatter(data_chosen[:, 1], data_chosen[:, 2],\n marker=markers[j],\n color=colors[j])\n pl.title('k = %s' % (i+1))\n save_fig('kmeansHeightWeight_%s.png' % (i+1))\npl.show()\n", "#!/usr/bin/env python3\n\n# Plots loss functions of form |x|**q\n\nimport matplotlib.pyplot as pl\nimport numpy as np\nfrom scipy.stats import t, laplace, norm\nfrom utils.util import save_fig\n\nx = np.linspace(-4, 4, 100)\npl.title('|x|^0.2')\npl.plot(x, np.absolute(x)**.2)\nsave_fig('lossFunctionFig_01.png')\n\npl.figure()\npl.title('|x|')\npl.plot(x, np.absolute(x))\nsave_fig('lossFunctionFig_02.png')\n\npl.figure()\npl.title('|x|^2')\npl.plot(x, np.absolute(x)**2)\nsave_fig('lossFunctionFig_03.png')\npl.show()\n" ]
[ [ "matplotlib.pyplot.title", "sklearn.cluster.KMeans", "numpy.unique", "matplotlib.pyplot.scatter", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.absolute", "matplotlib.pyplot.title", "numpy.linspace", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mhnnunes/nas_gnn
[ "91092acfee9fdbbef3e22252040b80aa96143311", "91092acfee9fdbbef3e22252040b80aa96143311" ]
[ "graphnas_variants/micro_graphnas/micro_search_space.py", "graphnas/evolution_trainer.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom torch.nn import Module\nfrom torch_geometric.nn.conv import *\n\ngnn_list = [\n \"gat_8\", # GAT with 8 heads\n \"gat_6\", # GAT with 6 heads\n \"gat_4\", # GAT with 4 heads\n \"gat_2\", # GAT with 2 heads\n \"gat_1\", # GAT with 1 heads\n \"gcn\", # GCN\n \"cheb\", # chebnet\n \"sage\", # sage\n \"arma\",\n \"sg\", # simplifying gcn\n \"linear\", # skip connection\n \"zero\", # skip connection\n]\nact_list = [\n # \"sigmoid\", \"tanh\", \"relu\", \"linear\",\n # \"softplus\", \"leaky_relu\", \"relu6\", \"elu\"\n \"sigmoid\", \"tanh\", \"relu\", \"linear\", \"elu\"\n]\n\n\ndef act_map(act):\n if act == \"linear\":\n return lambda x: x\n elif act == \"elu\":\n return F.elu\n elif act == \"sigmoid\":\n return torch.sigmoid\n elif act == \"tanh\":\n return torch.tanh\n elif act == \"relu\":\n return torch.nn.functional.relu\n elif act == \"relu6\":\n return torch.nn.functional.relu6\n elif act == \"softplus\":\n return torch.nn.functional.softplus\n elif act == \"leaky_relu\":\n return torch.nn.functional.leaky_relu\n else:\n raise Exception(\"wrong activate function\")\n\n\ndef gnn_map(gnn_name, in_dim, out_dim, concat=False, bias=True) -> Module:\n '''\n :param gnn_name:\n :param in_dim:\n :param out_dim:\n :param concat: for gat, concat multi-head output or not\n :return: GNN model\n '''\n if gnn_name == \"gat_8\":\n return GATConv(in_dim, out_dim, 8, concat=concat, bias=bias)\n elif gnn_name == \"gat_6\":\n return GATConv(in_dim, out_dim, 6, concat=concat, bias=bias)\n elif gnn_name == \"gat_4\":\n return GATConv(in_dim, out_dim, 4, concat=concat, bias=bias)\n elif gnn_name == \"gat_2\":\n return GATConv(in_dim, out_dim, 2, concat=concat, bias=bias)\n elif gnn_name in [\"gat_1\", \"gat\"]:\n return GATConv(in_dim, out_dim, 1, concat=concat, bias=bias)\n elif gnn_name == \"gcn\":\n return GCNConv(in_dim, out_dim)\n elif gnn_name == \"cheb\":\n return ChebConv(in_dim, out_dim, K=2, bias=bias)\n elif gnn_name == \"sage\":\n return SAGEConv(in_dim, out_dim, bias=bias)\n elif gnn_name == \"gated\":\n return GatedGraphConv(in_dim, out_dim, bias=bias)\n elif gnn_name == \"arma\":\n return ARMAConv(in_dim, out_dim, bias=bias)\n elif gnn_name == \"sg\":\n return SGConv(in_dim, out_dim, bias=bias)\n elif gnn_name == \"linear\":\n return LinearConv(in_dim, out_dim, bias=bias)\n elif gnn_name == \"zero\":\n return ZeroConv(in_dim, out_dim, bias=bias)\n\n\nclass LinearConv(Module):\n def __init__(self,\n in_channels,\n out_channels,\n bias=True):\n super(LinearConv, self).__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.linear = torch.nn.Linear(in_channels, out_channels, bias)\n\n def forward(self, x, edge_index, edge_weight=None):\n return self.linear(x)\n\n def __repr__(self):\n return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,\n self.out_channels)\n\n\nclass ZeroConv(Module):\n def __init__(self,\n in_channels,\n out_channels,\n bias=True):\n super(ZeroConv, self).__init__()\n self.out_dim = out_channels\n\n def forward(self, x, edge_index, edge_weight=None):\n return torch.zeros([x.size(0), self.out_dim]).to(x.device)\n\n def __repr__(self):\n return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,\n self.out_channels)\n\n\nclass SearchSpace(object):\n def __init__(self, search_space=None):\n if search_space:\n self.search_space = search_space\n else:\n self.search_space = {}\n self.search_space[\"act\"] = act_list # activate function\n self.search_space[\"gnn\"] = gnn_list # gnn type\n # 0 means history, 1 means current,\n # each layer contains two input\n self.search_space[\"self_index\"] = [0, 1]\n # same as self_index,\n self.search_space[\"concat_type\"] = [\"add\",\n \"product\",\n \"concat\"]\n self.search_space['learning_rate'] = [1e-2, 1e-3, 1e-4, 5e-3, 5e-4]\n self.search_space['dropout'] = [0.0, 0.1, 0.2, 0.3, 0.4,\n 0.5, 0.6, 0.7, 0.8, 0.9]\n self.search_space['weight_decay'] = [0, 1e-3, 1e-4,\n 1e-5, 5e-5, 5e-4]\n self.search_space['hidden_unit'] = [8, 16, 32, 64, 128, 256, 512]\n pass\n\n def get_search_space(self):\n return self.search_space\n\n @staticmethod\n def generate_action_list(cell=4):\n action_list = []\n for i in range(cell):\n action_list += [\"self_index\", \"gnn\"]\n action_list += [\"act\", \"concat_type\"]\n return action_list\n\n\nclass IncrementSearchSpace(object):\n def __init__(self, search_space=None, max_cell=10):\n if search_space:\n self.search_space = search_space\n else:\n self.search_space = {}\n self.search_space[\"act\"] = act_list # activate function\n self.search_space[\"gnn\"] = gnn_list # gnn type\n for i in range(max_cell):\n self.search_space[f\"self_index_{i}\"] = list(range(2 + i))\n # 0 means history, 1 means current,\n # each layer contains two input\n self.search_space[\"concat_type\"] = [\"add\",\n \"product\",\n \"concat\"]\n # same as self_index,\n self.search_space['learning_rate'] = [1e-2, 1e-3, 1e-4, 5e-3, 5e-4]\n self.search_space['dropout'] = [0.0, 0.1, 0.2, 0.3, 0.4,\n 0.5, 0.6, 0.7, 0.8, 0.9]\n self.search_space['weight_decay'] = [0, 1e-3, 1e-4,\n 1e-5, 5e-5, 5e-4]\n self.search_space['hidden_unit'] = [8, 16, 32, 64, 128, 256, 512]\n pass\n\n def get_search_space(self):\n return self.search_space\n\n @staticmethod\n def generate_action_list(cell=4):\n action_list = []\n for i in range(cell):\n action_list += [f\"self_index_{i}\", \"gnn\"]\n action_list += [\"act\", \"concat_type\"]\n return action_list\n\n\nif __name__ == \"__main__\":\n obj = IncrementSearchSpace()\n print(obj.generate_action_list())\n print(obj.get_search_space())\n", "import time\nimport torch\nimport numpy as np\nfrom collections import deque\nfrom graphnas.trainer import Trainer\n\n\nclass Evolution_Trainer(Trainer):\n \"\"\"\n This class implements the Asyncronous Aging Evolution,\n proposed by Real et. al. on:\n\n Regularized Evolution for Image Classifier Architecture Search\n\n available on: https://arxiv.org/abs/1802.01548\n \"\"\"\n def __init__(self, args):\n super(Evolution_Trainer, self).__init__(args)\n self.args = args\n self.random_seed = args.random_seed\n self.population = deque()\n self.accuracies = deque()\n self.population_size = args.population_size\n self.sample_size = args.sample_size\n self.cycles = args.cycles\n self.init_time = 0\n print('initializing population on evolution_trainer init, maybe not the best strategy')\n self.__initialize_population()\n\n def derive_from_population(self):\n population = self._construct_action(self.population)\n best_score_index, _ = \\\n self._get_best_individual_accuracy(self.accuracies)\n best_structure = self.form_gnn_info(population[best_score_index])\n print(\"[DERIVE] Best Structure:\", str(best_structure))\n # train from scratch to get the final score\n np.random.seed(self.random_seed)\n torch.manual_seed(self.random_seed)\n torch.cuda.manual_seed_all(self.random_seed)\n test_scores_list = []\n for i in range(10): # run 10 times to get Mean and Stddev\n val_acc, test_acc = self.submodel_manager.evaluate(best_structure)\n test_scores_list.append(test_acc)\n print(\"[DERIVE] Best Results: \", best_structure, \": \",\n np.mean(test_scores_list),\n \"+/-\", np.std(test_scores_list))\n\n def _mutate_individual(self, indiv):\n # Choose a random position on the individual to mutate\n position_to_mutate = np.random.randint(len(indiv))\n # This position will receive a randomly chosen index\n # of the search_spaces's list\n # for the action corresponding to that position in the individual\n sp_list = self.search_space[self.action_list[position_to_mutate]]\n indiv[position_to_mutate] = \\\n np.random.randint(0, len(sp_list))\n return indiv\n\n def _get_best_individual_accuracy(self, accs):\n max_acc_index = 0\n max_acc = -1\n for index, acc in enumerate(accs):\n if acc > max_acc:\n max_acc = acc\n max_acc_index = index\n return max_acc_index, max_acc\n\n def __initialize_population(self):\n print(\"\\n\\n===== Evaluating initial random population =====\")\n start_initial_population_time = time.time()\n while len(self.population) < self.population_size:\n # print('adding individual #:', len(population))\n individual = self._generate_random_individual()\n ind_actions = self._construct_action([individual])\n gnn = self.form_gnn_info(ind_actions[0])\n _, ind_acc = \\\n self.submodel_manager.train(gnn, format=self.args.format)\n print(\"individual:\", individual, \" val_score:\", ind_acc)\n self.accuracies.append(ind_acc)\n self.population.append(individual)\n end_initial_pop_time = time.time()\n self.init_time = end_initial_pop_time - start_initial_population_time\n print(\"Time elapsed initializing population: \" +\n str(self.init_time))\n print(\"===== Evaluating initial random population DONE ====\")\n\n def train(self):\n print(\"\\n\\n===== Evolution ====\")\n start_evolution_time = time.time()\n while self.cycles > 0:\n sample = [] # list with indexes to population individuals\n sample_accs = [] # accuracies of the sampled individuals\n while len(sample) < self.sample_size:\n candidate = np.random.randint(0, len(self.population))\n sample.append(self.population[candidate])\n sample_accs.append(self.accuracies[candidate])\n\n # Get best individual on sample to serve as parent\n max_sample_acc_index, max_sample_acc = \\\n self._get_best_individual_accuracy(sample_accs)\n parent = sample[max_sample_acc_index]\n # print('parent: ', parent)\n child = parent.copy()\n child = self._mutate_individual(child)\n # print('child: ', child)\n child_actions = self._construct_action([child])\n gnn = self.form_gnn_info(child_actions[0])\n _, child_acc = \\\n self.submodel_manager.train(gnn, format=self.args.format)\n # print('child acc: ', child_acc)\n print(\"parent: \", str(parent), \" val_score: \", str(max_sample_acc),\n \"| child: \", str(child), \", val_score: \", str(child_acc))\n self.accuracies.append(child_acc)\n self.population.append(child)\n if self.cycles % self.args.eval_cycle == 0:\n self.derive_from_population()\n # Remove oldest individual (Aging/Regularized evolution)\n self.population.popleft()\n self.accuracies.popleft()\n print(\"[POPULATION STATS] Mean/Median/Best: \",\n np.mean(self.accuracies),\n np.median(self.accuracies),\n np.max(self.accuracies))\n self.cycles -= 1\n end_evolution_time = time.time()\n total_evolution_time = end_evolution_time - start_evolution_time\n print('Time spent on evolution: ' +\n str(total_evolution_time))\n print('Total elapsed time: ' +\n str(total_evolution_time + self.init_time))\n print(\"===== Evolution DONE ====\")\n\n def derive(self, sample_num=None):\n self.derive_from_population()\n" ]
[ [ "torch.nn.Linear" ], [ "numpy.random.seed", "torch.manual_seed", "numpy.median", "numpy.max", "numpy.std", "numpy.mean", "torch.cuda.manual_seed_all" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NREL/PVwindow
[ "df7091c9d1ebd280aca53c50015e3b1ee7a3183e", "df7091c9d1ebd280aca53c50015e3b1ee7a3183e" ]
[ "tmmPCECalc.py", "FitAngular.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 4 12:29:21 2021\n\n@author: aduell\n\"\"\"\n\n\n#import numpy as np\nfrom numpy import pi, linspace, array, exp\n#import tmm\nfrom tmm import inc_tmm, inc_absorp_in_each_layer, inf\n#import pandas as pd\n#import tmm_vw as tmm\n#import matplotlib.pyplot as plt\nfrom matplotlib.pyplot import plot,figure,xlabel,ylabel,show,ylim,legend\nfrom wpv import Layer, Stack\n#import scipy.interpolate, scipy.integrate, pandas, sys\nfrom scipy.interpolate import interp1d\nfrom scipy.integrate import quad, trapz\nfrom scipy.optimize import fsolve#, Bounds\nimport scipy.optimize\nfrom pandas import read_excel\nimport sys\n#import scipy\n#from numericalunits import W, K, nm, m, cm, s, eV, meV, V, mA, c0, hPlanck, kB, e, A, ohm\n#import sympy\n#import sympy.solvers.solvers\nassert sys.version_info >= (3,6), 'Requires Python 3.6+'\nfrom pvlib.pvsystem import singlediode\nimport tmmPVColor as pvc\nimport CalculateVLTFromSpectrum as cvs\nfrom CalculateVLTFromSpectrum import AM15G, cieplf\nimport vegas\n\n\n\n# This whole thing uses microns for length\n\n'''We determine the incident angle of the sun shining on the cell. Input is in degrees'''\ndef giveincangle(angle):\n degree = pi/180\n return angle*degree\ninc_angle = giveincangle(0) \n'''We determine the size and scaling of the photon wavelength scale. Units are um''' \nnum_lams = 500\nlams = linspace(0.3,2.5,num=num_lams) #um\n\n'''We are constants and help control units'''\nq = 1.602176634e-19 #coulombs. elementary charge \nc0 = 299792458 #m/s #Speed of light\nhPlanck = 6.62607015e-34 #J*s 4.135667516e-15 #eV*s \nkB = 1.380649e-23 #J/K 8.61733034e-5 #eV/K \n\n'''Some units and terms'''\n'''Tcell, Ti, To are cell temperature, inside temp and outside temp. Always in kelvin'''\n'''Ui and Uo are overall heat-transfer coefficient ofr in side and outside. W/(m**2 *K)'''\n'''AbsorberLayer is a number indicating the photoactive layer. If the fourth layer is the PV layer, input is 4'''\n''''Rs is series resistance, Rsh is shunt resistance in ohms. See pveducation.org for more info''' \n'''eta is the electron-hole pair extraction efficiency term. eta times all absorbed light in the PV layer gives the EQE'''\n'''n = diode ideality factor. Used in singlediode equation\nNs = number of cells in series. Used in singlediode equation'''\n'''Rtot is total thermal resistance of the window'''\n\n\n\n\n\n'''We are all the different materials currently available\nThickness is in microns'''\ndef Glass(Thickness = 6000):\n return Layer(Thickness,'nkLowFeGlass','i')\ndef TiO2(Thickness = 0.050):\n return Layer(Thickness,'nkTiO2','c')\ndef FTO(Thickness = 0.250):\n return Layer(Thickness,'nkFTO','c')\ndef MAPI(Thickness = 0.130): \n return Layer(Thickness,'nkMAPI','c')\ndef AZO(Thickness = 0.200):\n return Layer(Thickness,'nkAZO','c')\ndef ITO(Thickness = 0.200):\n return Layer(Thickness,'nkITO','c')\ndef ITOlowE(Thickness = 0.075):\n return Layer(Thickness,'nkITO','c')\ndef SnO2(Thickness = 0.05):\n return Layer(Thickness,'nkSnO2','c')\ndef SnO2lowE(Thickness = 0.030):\n return Layer(Thickness,'nkSnO2','c')\ndef SnO2lowEfat(Thickness = 0.050):\n return Layer(Thickness,'nkSnO2','c')\ndef SiO2(Thickness = 0.024):\n return Layer(Thickness,'nkSiO2','c')\ndef NiO(Thickness = 0.050):\n return Layer(Thickness,'nkNiO','c')\ndef Ag(Thickness = 0.015):\n return Layer(Thickness,'nkAg','c')\ndef TiO2lowE(Thickness = 0.030):\n return Layer(Thickness,'nkTiO2','c')\ndef TiO2lowEfat(Thickness = 0.060):\n return Layer(Thickness,'nkTiO2','c')\ndef Bleach(Thickness = 0.370):\n return Layer(Thickness,'nkBleach','c')\ndef ClAlPc(Thickness = 0.300):\n return Layer(Thickness,'nkClAlPc','c')\ndef C60(Thickness = 0.200):\n return Layer(Thickness,'nkC60','c')\ndef IR(Thickness = 0.060):\n return Layer(Thickness,'nkPTB7_ThIEICO_4F','c')\ndef MAPBr(Thickness = 0.500):\n return Layer(Thickness,'nkMAPbBr3','c')\ndef EVA(Thickness = 3000):\n return Layer(Thickness,'nkEVA','i')\n\n\n'''We are boundary conditions corresponding to each material type\nCan be changed to tune optimization range'''\nGlassBound = (5999,6001)\nTiO2Bound = (0.025,.1)\nFTOBound = (0.1,0.5)\nMAPIBound = (.06,.260)\nAZOBound = (.1,.4)\nITOBound = (.1,.4)\nITOlowEBound = (0.03,.15)\nSnO2Bound = (.025,.1)\nSnO2lowEBound = (.015,.06)\nSnO2lowEfatBound = (0.025,.1)\nSiO2Bound = (.012,.05)\nNiOBound = (.025,.1)\nAgBound = (.0149, .0151)\nTiO2lowEBound = (.015, .070)\nTiO2lowEfatBound = (.03,.12)\nBleachBound = (.180, .500)\nClAlPcBound = (.150, .600)\nC60Bound = (.100,.400)\nIRBound = (.030, .12)\nMAPBrBound = (.250,1)\nEVABound = (2999,3001)\n\n\n'''I assemble a list of layer objects using Thicknesses and Materials''' \ndef GiveLayers(Thickness,Materials):\n x = len(Materials)\n if x == len(Thickness):\n Layers = []\n for i in range(x):\n Layers.append(Materials[i](Thickness[i]))\n return Layers\n else: \n raise ValueError ('layers and Thickness lengths do not match')\n\n'''I give a list of boundaries from a list of materials. Dict is a dictionary containing the boundary conditions\nAll items in the dicitonary are labelled as 'Material'+'Bound' '''\n'''\ndef GiveBounds(Materials, DictBound):\n x = len(Materials)\n Bounds = []\n for i in range(x):\n Bounds.append(DictBound[Materials[i].__name__ + 'Bound'])\n Bounds = array(Bounds)\n return Bounds\n'''\n\n'''I produce a Bounds object that defines the boundary conditions for optimization\nThe version above can be used to produce a list of bounds rather than an object'''\n\ndef GiveBounds(Materials, DictBound):\n x = len(Materials)\n lb = []\n ub = []\n for i in range(x):\n lb.append(DictBound[Materials[i].__name__ + 'Bound'][0])\n for i in range(x):\n ub.append(DictBound[Materials[i].__name__ + 'Bound'][1])\n bounds = scipy.optimize.Bounds(lb,ub)\n return bounds\n\n'''I give a list of thicknesses from a list of materials. Dict is a dictionary containing the thickness values\nAll items in the dicitonary are labelled as 'Material'+'Th' '''\ndef GiveThicks(Materials, DictTh):\n x = len(Materials)\n Th = []\n for i in range(x):\n Th.append(DictTh[Materials[i].__name__ + 'Th'])\n return Th\n\n'''Calculates Spectra Based on the layers of the cell\nAbsorberLayer is an integer giving the position of the PV layer in the stack. Currently supports 1 PV layer'''\ndef Spectra(layers, AbsorberLayer):\n thicks = [inf]\n iorcs = ['i']\n for layer in layers:\n thicks.append(layer.d)\n iorcs.append(layer.i_or_c)\n thicks.append(inf)\n iorcs.append('i')\n \n \n thicks_bw = thicks[::-1]\n iorcs_bw = iorcs[::-1]\n\n Ts = []\n Rfs = []\n Rbs = []\n AbsByAbsorbers = []\n #EQEs2 = []\n #IREQEs = []\n\n\n layerchoice = AbsorberLayer \n #layerchoice2 = 5\n\n for lam in lams:\n\n nks = [1]\n for layer in layers:\n nks.append(layer.nk(lam))\n nks.append(1)\n \n nks_bw = nks[::-1]\n \n front_spol = inc_tmm('s',nks,thicks,iorcs,inc_angle,lam)\n front_ppol = inc_tmm('p',nks,thicks,iorcs,inc_angle,lam)\n back_spol = inc_tmm('s',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)\n back_ppol = inc_tmm('p',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)\n \n AbsByAbsorber_spol = inc_absorp_in_each_layer(front_spol)[layerchoice]\n AbsByAbsorber_ppol = inc_absorp_in_each_layer(front_ppol)[layerchoice]\n \n AbsByAbsorbers.append( (AbsByAbsorber_spol + AbsByAbsorber_ppol) / 2. )\n \n # EQE_spol2 = tmm.inc_absorp_in_each_layer(front_spol)[layerchoice2]\n # EQE_ppol2 = tmm.inc_absorp_in_each_layer(front_ppol)[layerchoice2]\n \n # EQEs2.append( (EQE_spol2 + EQE_ppol2) / 2. )\n \n Rfs.append( (front_spol['R']+front_ppol['R']) / 2.)\n Rbs.append( (back_spol['R']+back_ppol['R']) / 2.)\n Ts.append( (front_spol['T']+front_ppol['T']) / 2. )\n\n\n Ts = array(Ts)\n Rfs = array(Rfs)\n Rbs = array(Rbs)\n As = 1-Ts-Rfs\n sanities = Ts+Rfs+As\n\n AbsByAbsorbers = array(AbsByAbsorbers)\n Spectra = {'AbsByAbsorbers':AbsByAbsorbers, 'Ts':Ts,'Rfs':Rfs,'Rbs':Rbs,'As':As,'Total':sanities}\n return Spectra\n\n\n\n\n''' Here I calculate VLT and spit it out to the screen'''\n\n'''Gives a spectrum of VLT. Used for plotting'''\ndef VLTSpectrum(layers):\n return Stack(layers)\n'''Gives VLT as a single number'''\ndef VLT(layers):\n VLTstack=Stack(layers)\n return VLTstack.get_visible_light_transmission(lams,inc_angle)\n\n'''This gives VLT as a single number. eliminates\nneed to recalculate AM15G and cieplf every iteration. Unclear if this will work for \noptimization'''\ndef getFancyVLT(layers):#,lamrange,inc_angle):\n integ = vegas.Integrator([lams])\n Trans=Stack(layers)\n numerator = integ(lambda lam: AM15G(lam)*cieplf(lam)*Trans.get_RAT(lam,inc_angle)[2], nitn=10, neval=100)[0]\n denominator = integ(lambda lam: AM15G(lam)*cieplf(lam), nitn=10, neval=100)[0]\n VLT = numerator/denominator\n return VLT.mean\n\n'''Gives minimum and maximum VLT based exclusively on the PV layer. \nOnly useful for judging VLT constraint for a given PV material\nRequires input of single absorber layer with a tuple of (lb,ub)'''\ndef GiveMinMaxVLT(AbsorberType, Bounds):\n minThick = GiveLayers([Bounds[0]], [AbsorberType]) \n maxThick = GiveLayers([Bounds[1]], [AbsorberType])\n minimum = VLT(maxThick)\n maximum = VLT(minThick)\n return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],\n 'maxThick':Bounds[1]}\n\n'''Gives minimum and maximum VLT based exclusively on the PV layer. \nRequires list of materials, absorbing layer, and absorber bounds'''\ndef GiveMinMaxVLTFromMaterials(Materials, AbsorberLayer, Bounds):\n AbsorberType = Materials[AbsorberLayer-1]\n minThick = GiveLayers([Bounds[0]], [AbsorberType]) \n maxThick = GiveLayers([Bounds[1]], [AbsorberType])\n minimum = VLT(maxThick)\n maximum = VLT(minThick)\n return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],\n 'maxThick':Bounds[1]}\n\n\n\n# ******************** Here I add PCE calculation *********************#\n \n'''This stuff imports a spreadsheet of the solar spectrum'''\n#worksheet = pandas.read_excel('https://www.nrel.gov/grid/solar-resource/assets/data/astmg173.xls')\nworksheet = read_excel('./Data/ASTMG173.xls')#('https://www.nrel.gov/grid/solar-resource/assets/data/astmg173.xls')\n#worksheet = pandas.read_excel('/Users/lwheeler/Code/pv-window-bem/Data/astmg173.xls')\ndownloaded_array = array(worksheet)\n\n# Wavelength is in column 0, AM1.5G data is column 2\nAM15 = downloaded_array[1:, [0,2]]\n\n# The first line should be 280.0 , 4.7309E-23\n# The last line should be 4000.0, 7.1043E-03\n# print(AM15)\n\n# Interpolate to get a continuous function which I will be able to do integrals on:\n'''Interpolated solar spectrum\nwhen using, inputs must be within 300-2500 nm'''\nAM15interp = interp1d(AM15[:,0]/1000, AM15[:,1])\n\n\n# Here’s the plot, it looks correct:\n'''Plot of the solar spectrum for verification'''\n'''\ny_values = np.array([AM15interp(x) for x in lams])\nfigure()\nplot(lams , y_values)\nxlabel(\"Wavelength (nm)\")\nylabel(\"Spectral intensity (W/m$^2$/nm)\")\ntitle(\"Light from the sun\");\nshow()\n'''\n\n\n'''I convert wavelength to energy. E_min and max are used for integration limits '''\nEphoton = hPlanck * c0 / lams *1e6 #J\nE_min = min(Ephoton) #J energy units from hPlanck\nE_max = max(Ephoton) #J energy units from hPlanck\n\n\n'''I give the number of photons per......'''\ndef SPhotonsPerTEA(Ephoton):\n λ = hPlanck * c0 / Ephoton *1e6 #um\n return AM15interp(λ) * (1 / Ephoton) * (hPlanck * c0 / Ephoton**2) * 1e9\n'''I give the power for each......'''\ndef PowerPerTEA(Ephoton):\n return Ephoton * SPhotonsPerTEA(Ephoton)\n'''I give the solar constant which is the W/m*2 emitted by the sun. Should be ~1000'''\ndef Solar_Constant(Ephoton):\n #PowerPerTEA = lambda E : E * SPhotonsPerTEA(E)\n return quad(PowerPerTEA,E_min,E_max, full_output=1)[0]\n# quad() is ordinary integration; full_output=1 is (surprisingly) how you hide\n# the messages warning about poor accuracy in integrating.\n'''This is the solar constant value. It is called by optimization and used in a variety of functions here\nShould always be ~1000'''\nsolar_constant = Solar_Constant(Ephoton)\n\n'''I return an interpolated function of a spectrum relative to photon wavelength. Used for plotting'''\ndef GivelamsInterp(Parameter):\n Curve = Parameter.round(8)\n return interp1d(lams, Curve)\n\n'''I return an interpolated function of a spectrum relative to photon energy'''\ndef GiveEInterp(Parameter):\n Curve = Parameter.round(8)\n return interp1d(Ephoton, Curve)\n\n'''I give Q based on a given spectrum. Units are W/m^2\nInput is a spectrum interpolated with respect to energy, E\neta should only be used if looking at a PV layer. Otherwise it is set to 1'''\ndef GiveQ(Spectra, eta = 1):#Spectra must be an interpolated function\n def integrand(E):\n return eta * Spectra(E) * PowerPerTEA(E)\n return quad(integrand, E_min, E_max, full_output=1)[0] \n \n'''\n#trapz calcs\ndef GiveQ(Spectra, eta = 1):#Spectra must be an array\n integrand = eta*Spectra*PowerPerTEA(Ephoton)\n return -np.trapz(integrand, Ephoton) \n'''\n\n'''\ndef GivePhotons(Spectra, eta):#Spectra must be an interpolated function\n def integrand(E):\n return eta * Spectra(E) * SPhotonsPerTEA(E)\n return quad(integrand, E_min, E_max)[0] \n'''\n# Here I input the spectrum of photons absorbed by the absorber material (Absorbed)\n# and the electron-hole pair extraction efficiency (eta). EQE = eta * Absorbed\n\n'''I give the rate of recombination for the solar cell, Units are photons/(s*m**2)'''\ndef RR0(eta,Absorbed,Tcell):\n integrand = lambda E : eta * Absorbed(E) * (E)**2 / (exp(E / (kB * Tcell)) - 1)\n integral = quad(integrand, E_min, E_max, full_output=1)[0]\n return ((2 * pi) / (c0**2 * hPlanck**3)) * integral# / 1.60218e-19 #J/eV\n#units = photons/(s*m**2)\n\n'''I give the amount of energy converted to electricity in terms of photons, units are photons(s/m**2)'''\ndef Generated(eta,Absorbed):\n integrand = lambda E : eta * Absorbed(E) * SPhotonsPerTEA(E)\n# integral = quad(integrand, E_min, E_max, full_output=1)[0]\n return quad(integrand, E_min, E_max, full_output=1)[0]\n#units photons/(s*m**2)\n'''\n#Using trapezoidal rule for integration instaed of quad\n#AbsByAbsorbers is an aray of intensities, not an interpolated function.\ndef RR0(eta,Absorbed,Tcell):\n AbsByAbsorbers = AbsByAbsorbers.round(8)\n integrand = eta * AbsByAbsorbers * (Ephoton)**2 / (np.exp(Ephoton / (kB * Tcell)) - 1)\n integral = trapz(integrand, Ephoton)\n return ((2 * np.pi) / (c0**2 * hPlanck**3)) * integral\n\ndef Generated(eta,Absorbed):\n Absorbed = Absorbed.round(8)\n integrand = eta * Absorbed * SPhotonsPerTEA(Ephoton)\n# integral = quad(integrand, E_min, E_max, full_output=1)[0]\n return np.trapz(integrand, Ephoton)\n'''\n\n'''I use the single diode equation to return the max power of the cell in watts\nCheck PVlib documentation for details'''\ndef Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell, n = 1, Ns = 1):\n data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed,Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)\n return data['p_mp']\n\n'''I calculate equilibrium tmperature of the cell assuming the cell is infinitely thin\nTotalAbs is the full absorptance of the stack as an array of intensities, uninterpolated. \nAbsorbed is PV layer absorptance interpolated\nTemperature calculation is implicit so the numerical solver fsolve is used.\nThis equation is derived from Wheeler and Wheeler Detailed Balance Analysis of Photovoltaic Windows'''\ndef TcellCalc(TotalAbs, eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh):\n AbsTotal = GiveEInterp(TotalAbs)\n Qabs = GiveQ(AbsTotal)\n Temp = lambda Tcell: (Qabs - Give_Pmp(eta,Absorbed,Rs,Rsh, Tcell) + Ui*Ti + Uo*To)/(Ui + Uo)-Tcell\n return fsolve(Temp, 300)[0]\n\n\n'''I use the single diode equation to produce an IV curve and power plot\nI also return related values such as Voc, Isc, and Pmp in units volts, amps, and watts\nSee pvlib singlediode equation for more information'''\ndef GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1):\n data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed, Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)\n\n Isc = data['i_sc']\n Voc = data['v_oc']\n Imp = data['i_mp']\n Vmp = data['v_mp']\n Pmp = data['p_mp']\n Vvalues = array(data['v'])\n Ivalues = array(data['i'])\n #print('Isc = ', Isc, ', Voc = ', Voc, ', Imp = ', Imp, ', Vmp = ', Vmp, ', Pmp =', Pmp)\n\n figure()\n plot(Vvalues,Ivalues, label = 'IV')\n xlabel('Voltage, (V)')\n ylabel('Current (A) or Power (W/m^2)')\n ylabel('Power (W/m^2)')\n P_values = array([Ivalues * Vvalues])\n plot(Vvalues , P_values.T, label = 'Power')\n ylim(-1, 150)\n legend(loc = 'upper right')\n show()\n return data\n\n\n\n'''I give the solar heat gain coefficient. unitless numebr between 0 and 1\nTs is the transmission spectra. Must be a list of intensities, not an interpolated function\nThis equation comes form a combination of Wheeler and Wheeler Detailed Balance Analysis of Photovoltaic Windows\nand equation 3.18 from Fundamentals of Heat and Mass Transfer 6ed Incropera'''\ndef SHGC(Ts, Ti, To, Tcell, Ui):\n #Tcell = TcellCalc(As,Ti,To,eta,Absorbed)\n Rtot = 1/Ui #This is approximate because Ui is assumed\n #Included in GiveQ for simplicity but should not be used for calculating SHGC\n TransTotal = GiveEInterp(Ts)\n Qtrans = GiveQ(TransTotal,1)\n return (Qtrans + Ui*(Tcell-Ti) - ((To-Ti)/Rtot))/solar_constant\n\n'''I give max efficiency also called PCE'''\n'''Absorbed must be an interpolated function of the absorption spectrum of the PV layer'''\ndef max_efficiency(eta,Absorbed,Tcell, Rs, Rsh):\n #Tcell = TcellCalc(As,Ti,To,eta,Absorbed)\n return Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell) / solar_constant\n\n'''I give important info about a solar cell such as PCE, SHGC, Temperature, etc'''\ndef GiveImportantInfo(Thickness, Materials,eta,Ti,To,Ui,Uo,Rs,Rsh,AbsorberLayer,Angle=0):\n global inc_angle\n inc_angle = giveincangle(Angle)\n \n layers = GiveLayers(Thickness,Materials)\n \n spectra = Spectra(layers ,AbsorberLayer)\n AbsByAbsorbers = spectra['AbsByAbsorbers']\n Ts = spectra['Ts']\n Rfs = spectra['Rfs']\n Rbs = spectra['Rbs']\n As = spectra['As']\n sanities = spectra['Total']\n Absorbed = GiveEInterp(AbsByAbsorbers)\n VLTcalc = cvs.getVLT(Ts,lams)#VLT(layers)\n Tcell = TcellCalc(As,eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh)\n #Absorbed = tpc.GiveEInterp(tpc.Spectra(tpc.GiveLayers(Thickness, Materials),4)['AbsByAbsorbers'])\n data = GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1)\n Isc = data['i_sc']\n Voc = data['v_oc']\n Imp = data['i_mp']\n Vmp = data['v_mp']\n Pmp = data['p_mp']\n SHGCcalc = SHGC(Ts, Ti, To, Tcell, Ui)\n PCE = max_efficiency(eta,Absorbed,Tcell, Rs, Rsh)\n\n\n #Spectral Curves\n figure()\n plot(lams,Rfs,color='magenta',marker=None,label=\"$R_f$\")\n plot(lams,Ts,color='green',marker=None,label=\"$T$\")\n plot(lams,Rbs,color='purple',marker=None,label=\"$R_b$\")\n plot(lams,As,color='black',marker=None,label=\"A\")\n plot(lams,AbsByAbsorbers,color='black',linestyle='--',marker=None,label=\"AbsByAbsorber\")\n plot(lams,sanities,color='gold',marker=None,label=\"R+A+T\")\n plot(lams,VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label=\"photopic\")\n xlabel('wavelength, $\\mu$m')\n ylabel('Intensity')\n legend(loc = 'upper right')\n show()\n \n EphotoneV = Ephoton*6.241509e+18 \n figure()\n plot(EphotoneV, Ts, color='magenta',marker=None,label=\"$T$\")\n plot(EphotoneV, Rfs,color='green',marker=None,label=\"$R_f$\")\n plot(EphotoneV, Rbs,color='orange',marker=None,label=\"$R_b$\")\n plot(EphotoneV, AbsByAbsorbers,color='black',marker=None,label=\"Abs\")\n #plot(Ephoton,tpc.VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label=\"photopic\")\n legend(loc = 'upper right')\n xlabel('Energy, eV')\n ylabel('Intensity')\n show()\n\n pvc.GiveColorSwatch(Ts, Rfs)\n pvc.plot_xy_on_fin(Ts, Rfs)\n\n print('PCE = ',PCE,'VLT = ', VLTcalc, 'SHGC = ',SHGCcalc, 'Tcell = ',Tcell)#,'time to calculate PCE from scratch in seconds = ', TimePCE, 'Time to run optimizer in minutes = ',TimeOptimize/60)\n return {'PCE':PCE, 'VLT':VLTcalc, 'SHGC':SHGCcalc, 'Tcell':Tcell,'Isc':Isc, 'Voc': Voc, 'Imp': Imp, 'Vmp': Vmp,'Pmp': Pmp}\n", "\"\"\"\nThis thing should find the reflectance and absorbtance as a function of \nangle of incidence. Then it fits these functions to a 4th order polynomial\nbecause that's what EnergyPlus does for some reason.\n\"\"\"\n\nimport numpy as np\nfrom wpv import Layer,Stack\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\n# This whole thing uses microns for length\n\ndegree = np.pi/180\ninc_angles = np.linspace(0,89,num=20)*degree\n\n''' \nnum_lams = 500\nlams = np.linspace(0.3,2.5,num=num_lams)\nlamrange = [min(lams),max(lams)]\n'''\n\nlam = 0.55\n\nGlass = Layer(4000,'nkLowFeGlass','i')\nTiO2 = Layer(0.05,'nkTiO2','c')\nFTO = Layer(0.3,'nkFTO','c')\nMAPI = Layer(0.5,'nkMAPI','c')\nITO = Layer(0.4,'nkITO','c')\nSnO2 = Layer(0.5,'nkSnO2','c')\nNiO = Layer(0.05,'nkNiO','c')\nAg = Layer(0.01,'nkAg','c')\nTiO2lowE = Layer(0.02,'nkTiO2','c')\nBleach = Layer(0.5,'nkTiO2','c')\nEVA = Layer(1500,'nkEVA','i')\n\n\n#MAPI.plotnk(lams)\n\n\nlayers = [Glass,FTO,TiO2,Bleach,NiO,ITO,EVA,Glass,TiO2lowE,Ag,TiO2lowE]\n#layers = [MAPI]\n\nstack = Stack(layers)\n\nRs=[]\nAs=[]\nTs=[]\nfor iang in inc_angles:\n [R,A,T] = stack.get_RAT(lam,iang)\n Rs.append(R)\n As.append(A)\n Ts.append(T) \n\n'''\nfor iang in inc_angles:\n Rs = []\n Ts = []\n for lam in lams:\n [R,A,T] = stack.get_RAT(lam,iang)\n Rs.append(R)\n Ts.append(T)\n''' \nRs = np.array(Rs)\nAs = np.array(As)\nTs = np.array(Ts)\n\ntaus = Ts/Ts[0]\n\ndef taubar(theta,t0,t1,t2,t3,t4):\n return t0+t1*np.cos(theta)+t2*np.cos(theta)**2+t3*np.cos(theta)**3+t4*np.cos(theta)**4\n\n \npopt, pcov = curve_fit(taubar, inc_angles, taus)\n\n\nplt.figure()\nplt.plot(inc_angles/degree,Rs,label=\"$R$\")\nplt.plot(inc_angles/degree,As,label=\"$A$\")\nplt.plot(inc_angles/degree,Ts,label=\"$T$\")\nplt.plot(inc_angles/degree, Ts[0]*taubar(inc_angles, *popt),label=\"$T_{fitted}$\")\nplt.plot(inc_angles/degree,Rs+As+Ts,label=\"$R+A+T$\")\nplt.xlabel(r\"$\\theta$\")\nplt.legend()\nplt.show()\n\nprint(taus)\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_excel", "scipy.optimize.fsolve", "numpy.linspace", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "numpy.exp", "matplotlib.pyplot.ylabel", "scipy.interpolate.interp1d", "scipy.integrate.quad", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.legend", "numpy.linspace", "numpy.cos", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.array", "scipy.optimize.curve_fit", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
zelhar/mg21
[ "f8392aba7deb63aa85f3d137ef81dea1bb742b41" ]
[ "demoNN.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader, Dataset, TensorDataset\nfrom torchvision import datasets\nfrom torchvision.transforms import ToTensor, Lambda, Compose\nimport matplotlib.pyplot as plt\n\nimport torch.distributions as D\n\nimport torch.nn.functional as F\n\n# Download training data from open datasets.\ntraining_data = datasets.FashionMNIST(\n root=\"data\",\n train=True,\n download=True,\n transform=ToTensor(),\n)\n\n# Download test data from open datasets.\ntest_data = datasets.FashionMNIST(\n root=\"data\",\n train=False,\n download=True,\n transform=ToTensor(),\n)\n\nbatch_size = 64\n\n# Create data loaders.\ntrain_dataloader = DataLoader(training_data, batch_size=batch_size)\ntest_dataloader = DataLoader(test_data, batch_size=batch_size)\n\nfor X, y in test_dataloader:\n print(\"Shape of X [N, C, H, W]: \", X.shape)\n print(\"Shape of y: \", y.shape, y.dtype)\n break\n\n# testing synthetic dataset\nx = torch.randn((100,3,28,28))\n\nd = TensorDataset(x)\n\nz = d.__getitem__(2) # retuns 1-tuple of tensor (no label) \nz[0].shape\n\n# with labels\ny = torch.randint(low=0, high=1, size=(100,))\nd = TensorDataset(x,y)\nz = d.__getitem__(2) # retuns 1-tuple of tensor (no label) \nz[0].shape\nz[1].shape\n\n# Get cpu or gpu device for training.\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nprint(\"Using {} device\".format(device))\n\n# Define model\nclass NeuralNetwork(nn.Module):\n def __init__(self):\n super(NeuralNetwork, self).__init__()\n self.flatten = nn.Flatten()\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(28*28, 512),\n nn.ReLU(),\n nn.Linear(512, 512),\n nn.ReLU(),\n nn.Linear(512, 10)\n )\n\n def forward(self, x):\n x = self.flatten(x)\n logits = self.linear_relu_stack(x)\n return logits\n\nmodel = NeuralNetwork().to(device)\nprint(model)\n\nloss_fn = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n\ndef train(dataloader, model, loss_fn, optimizer):\n size = len(dataloader.dataset)\n model.train()\n for batch, (X, y) in enumerate(dataloader):\n X, y = X.to(device), y.to(device)\n\n # Compute prediction error\n pred = model(X)\n loss = loss_fn(pred, y)\n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if batch % 100 == 0:\n loss, current = loss.item(), batch * len(X)\n print(f\"loss: {loss:>7f} [{current:>5d}/{size:>5d}]\")\n\ndef test(dataloader, model, loss_fn):\n size = len(dataloader.dataset)\n num_batches = len(dataloader)\n model.eval()\n test_loss, correct = 0, 0\n with torch.no_grad():\n for X, y in dataloader:\n X, y = X.to(device), y.to(device)\n pred = model(X)\n test_loss += loss_fn(pred, y).item()\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n test_loss /= num_batches\n correct /= size\n print(f\"Test Error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\")\n\nepochs = 5\nfor t in range(epochs):\n print(f\"Epoch {t+1}\\n-------------------------------\")\n train(train_dataloader, model, loss_fn, optimizer)\n test(test_dataloader, model, loss_fn)\nprint(\"Done!\")\n\n\n\nbce = nn.BCELoss(reduction=\"none\")\nx = torch.tensor(0.5)\ny = torch.tensor(0.7)\nbce(x,y)\n\nf = lambda x, y: y * torch.log(x) + (1-y) * torch.log(1-x)\nf(x,y)\n\n\ntorch.softmax(torch.tensor([1,2,3]), 0, torch.float64)\n\n# generate mixed distributions\nm = D.OneHotCategorical(torch.tensor([1,2,3,6]))\nm.sample()\nm.sample_n(10)\nm.sample((3,4))\n\nm = D.Normal(torch.tensor([0,10.0]), torch.tensor([1.0,2]))\n\nm.sample((3,4))\n\n# Example of target with class indices\nloss = nn.CrossEntropyLoss()\n\ninput = torch.randn(3, 5, requires_grad=True)\ntarget = torch.empty(3, dtype=torch.long).random_(5)\noutput = loss(input, target)\noutput.backward()\n\n# Example of target with class probabilities\ninput = torch.randn(3, 5, requires_grad=True)\ntarget = torch.randn(3, 5).softmax(dim=1)\noutput = loss(input, target)\noutput.backward()\n\ninput = torch.randn((3, 2), requires_grad=True)\ntarget = torch.rand((3, 2), requires_grad=False)\nloss = F.binary_cross_entropy(F.sigmoid(input), target)\nloss.backward()\n\nloss = nn.BCELoss(reduction=\"none\")\nx = torch.tensor([0,0.25,0.5,0.75,1])\nF.binary_cross_entropy(x,x,reduction=\"none\")\nloss(x,x)\n\nx = torch.tensor([0,25,0.5,0.75,1])\ny = torch.tensor([0,0.25,0.5,0.75,1])\nloss(x,y)\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.randint", "torch.empty", "torch.randn", "torch.utils.data.TensorDataset", "torch.utils.data.DataLoader", "torch.nn.Flatten", "torch.nn.BCELoss", "torch.tensor", "torch.nn.functional.sigmoid", "torch.nn.Linear", "torch.nn.functional.binary_cross_entropy", "torch.rand", "torch.cuda.is_available", "torch.no_grad", "torch.log", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ajrcampbell/pyro
[ "37680e6d08f20cda95729427143f17875484b21d", "37680e6d08f20cda95729427143f17875484b21d" ]
[ "pyro/distributions/reflected.py", "tests/distributions/test_triangular.py" ]
[ "from torch.distributions import constraints\nfrom torch.distributions.transforms import AbsTransform\n\nfrom pyro.distributions.torch import TransformedDistribution\n\n\nclass ReflectedDistribution(TransformedDistribution):\n \"\"\"\n Equivalent to ``TransformedDistribution(base_dist, AbsTransform())``,\n but additionally supports :meth:`log_prob` .\n\n :param ~torch.distributions.Distribution base_dist: The distribution to\n reflect.\n \"\"\"\n support = constraints.positive\n\n def __init__(self, base_dist, validate_args=None):\n if base_dist.event_shape:\n raise ValueError(\"Only univariate distributions can be reflected.\")\n super().__init__(base_dist, AbsTransform(), validate_args)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(type(self), _instance)\n return super().expand(batch_shape, _instance=new)\n\n def log_prob(self, value):\n if self._validate_args:\n self._validate_sample(value)\n dim = max(len(self.batch_shape), value.dim())\n plus_minus = value.new_tensor([1., -1.]).reshape((2,) + (1,) * dim)\n return self.base_dist.log_prob(plus_minus * value).logsumexp(0)\n", "import pytest\nimport torch\n\nimport pyro.distributions as dist\n\n\[email protected]('peak', [0.1, 0.3, 0.5, 0.7, 0.9])\ndef test_simulate_uniform(peak):\n n_samples = 10 ** 6\n x = torch.rand(len(peak))\n\n u = torch.FloatTensor(n_samples).uniform_()\n v = torch.FloatTensor(n_samples).uniform_()\n # From William E. Stein and Matthew F. Keblis\n # \"A new method to simulate the triangular distribution.\"\n # Mathematical and Computer Modelling, Volume 49, Issues 5–6, March 2009, Pages 1143-1147\n sim_triangular = peak + (u[:, None] - peak) * v[:, None].sqrt()\n sim_prob = (sim_triangular < x).sum(0) / (n_samples * 1.)\n\n triangular = dist.Triangular(0., 1., peak)\n prob = triangular.cdf(x)\n\n assert torch.all(torch.abs(prob - sim_prob) < 1e-3)\n" ]
[ [ "torch.distributions.transforms.AbsTransform" ], [ "torch.abs", "torch.FloatTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
psemdel/py-trading-bot
[ "69da4164b3f6a3ed3e6dc81d5aefc0273b4cb019" ]
[ "bot/orders/models.py" ]
[ "from django.db import models\nfrom django.utils import timezone\nfrom django.db.models import Q\n\nimport asyncio\nfrom ib_insync import IB, Stock, MarketOrder, util\nfrom core.common import empty_append\nfrom core.indicators import rel_dif\n\nimport vectorbtpro as vbt\nimport sys\nimport math\n\nimport pandas as pd\nimport numpy as np\n\nfrom trading_bot.settings import (PERFORM_ORDER, USE_IB_FOR_DATA,DIC_PERFORM_ORDER,\n IB_LOCALHOST, IB_PORT)\n\n### Interactive brockers and data retrieval ###\n'''\nContains:\n- Communication with Interactive brokers\n- Retrieval of live data (Interactive brokers or YFinance)\n- Performing order\n- Models for financial products, stock exchanges...\n\nNote: for some reasons, it does not work if myIB class is not in models\n'''\n\n## All symbols must be from same stock exchange\ndef retrieve_data(symbols,period,**kwargs):\n try:\n IBok=True\n for symbol in symbols:\n if kwargs.get(\"index\",False):\n action=Index.objects.get(symbol=symbol)\n else:\n action=Action.objects.get(symbol=symbol)\n \n if action.stock_ex.ib_ticker in [\"BVME.ETF\"]:\n IBok=False\n break\n \n index_symbol=exchange_to_symbol(action)\n \n if (USE_IB_FOR_DATA and IBok) or kwargs.get(\"useIB\",False): \n fig= ''.join(x for x in period if x.isdigit())\n if period.find(\"d\")!=-1:\n period_ib=fig +\" D\"\n elif period.find(\"mo\")!=-1:\n period_ib=fig +\" M\"\n elif period.find(\"y\")!=-1:\n period_ib=fig +\" Y\" \n \n #Time period of one bar. Must be one of: ‘1 secs’, ‘5 secs’, ‘10 secs’ 15 secs’, ‘30 secs’, ‘1 min’, ‘2 mins’, ‘3 mins’, ‘5 mins’, ‘10 mins’, ‘15 mins’, ‘20 mins’, ‘30 mins’, ‘1 hour’, ‘2 hours’, ‘3 hours’, ‘4 hours’, ‘8 hours’, ‘1 day’, ‘1 week’, ‘1 month’.\n if kwargs.get(\"interval\",False):\n fig= ''.join(x for x in kwargs.get(\"interval\") if x.isdigit())\n if period.find(\"m\")!=-1:\n interval=fig +\" mins\"\n elif period.find(\"h\")!=-1:\n interval=fig +\" hours\"\n elif period.find(\"d\")!=-1:\n interval=fig +\" day\"\n else:\n interval='1 day'\n \n open_=[]\n close=[]\n low=[]\n high=[]\n \n myIB=MyIB()\n for symbol in symbols:\n action=Action.objects.get(symbol=symbol)\n contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)\n bars = myIB.ib.reqHistoricalData(\n contract,\n endDateTime='',\n durationStr=period_ib, #\"10 D\",\"1 M\"\n barSizeSetting=interval, #\"1 day\", \"1 min\"\n whatToShow='TRADES',\n useRTH=True,\n formatDate=1)\n \n df=util.df(bars)\n open_=empty_append(open_,df[\"open\"].values,axis=1)\n close=empty_append(close,df[\"close\"].values,axis=1)\n high=empty_append(high,df[\"high\"].values,axis=1)\n low=empty_append(low,df[\"low\"].values,axis=1)\n volume=empty_append(low,df[\"volume\"].values,axis=1)\n \n cours_open=pd.DataFrame(data=open_,index=df[\"date\"],columns=symbols)\n cours_close=pd.DataFrame(data=close,index=df[\"date\"],columns=symbols)\n cours_low=pd.DataFrame(data=low,index=df[\"date\"],columns=symbols)\n cours_high=pd.DataFrame(data=high,index=df[\"date\"],columns=symbols)\n cours_volume=pd.DataFrame(data=volume,index=df[\"date\"],columns=symbols)\n \n action=Action.objects.get(symbol=index_symbol)\n contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)\n bars = myIB.ib.reqHistoricalData(\n contract,\n endDateTime='',\n durationStr=period_ib, #\"10 D\",\"1 M\"\n barSizeSetting=interval, #\"1 day\", \"1 min\"\n whatToShow='TRADES',\n useRTH=True,\n formatDate=1)\n \n df=util.df(bars)\n cours_open_ind=df[\"open\"]\n cours_close_ind=df[\"close\"]\n cours_high_ind=df[\"high\"]\n cours_low_ind=df[\"low\"]\n cours_volume_ind=df[\"volume\"]\n #Volume\n \n if len(cours_close_ind)!=len(cours_close):\n print(\"cours index is different from cours length\")\n \n myIB.disconnect()\n else:\n all_symbols=symbols+[index_symbol]\n cours=vbt.YFData.fetch(all_symbols, period=period,missing_index='drop',**kwargs)\n cours_action=cours.select(symbols)\n cours_open =cours_action.get('Open')\n cours_high=cours_action.get('High')\n cours_low=cours_action.get('Low')\n cours_close=cours_action.get('Close')\n cours_volume=cours_action.get('Volume')\n print(\"number of days retrieved: \" + str(np.shape(cours_close)[0]))\n \n cours_index=cours.select(index_symbol)\n cours_open_ind =cours_index.get('Open')\n cours_high_ind=cours_index.get('High')\n cours_low_ind=cours_index.get('Low')\n cours_close_ind=cours_index.get('Close')\n cours_volume_ind=cours_index.get('Volume')\n\n debug=False\n if debug:\n for symbol in all_symbols:\n data=vbt.YFData.fetch(symbol, period=period,**kwargs)\n \n #knowing what we drop\n close_debug=data.get(\"Close\")\n for ii in range(len(close_debug)):\n if math.isnan(close_debug.values[ii]):\n print(symbol)\n print(\"dropping at least \" + str(close_debug.index[ii]))\n \n return cours_high, cours_low, cours_close, cours_open, cours_volume, \\\n cours_high_ind, cours_low_ind, cours_close_ind, cours_open_ind,\\\n cours_volume_ind\n \n except Exception as msg:\n print(msg)\n print(\"exception in \" + __name__)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n print(msg) \n\ndef exchange_to_symbol(action):\n if action.stock_ex.ib_ticker==\"SBF\":\n return \"^FCHI\"\n elif action.stock_ex.ib_ticker==\"IBIS\":\n return \"^GDAXI\"\n elif action.stock_ex.ib_ticker==\"NASDAQ\":\n return \"^IXIC\"\n elif action.stock_ex.ib_ticker==\"BVME.ETF\":\n return \"^IXIC\" #it is only ETF anyhow\n\ndef get_exchange_actions(exchange):\n cat=ActionCategory.objects.get(short=\"ACT\")\n stockEx=StockEx.objects.get(name=exchange)\n \n c1 = Q(category=cat)\n c2 = Q(stock_ex=stockEx)\n \n actions=Action.objects.filter(c1 & c2)\n return [ob.symbol for ob in actions]\n \ndef retrieve_ib_pf():\n myIB=MyIB()\n pf=[]\n pf_short=[]\n \n for pos in myIB.ib.positions():\n contract=pos.contract\n action=Action.objects.get(ib_ticker=contract.localSymbol)\n \n if pos.position>0:\n pf.append(action.symbol)\n else:\n pf_short.append(action.symbol)\n\n myIB.disconnect()\n return pf, pf_short\n\n#for SL check\ndef get_last_price(symbol,**kwargs):\n try:\n if kwargs.get(\"index\",False):\n action=Index.objects.get(symbol=symbol)\n else:\n action=Action.objects.get(symbol=symbol) \n\n if USE_IB_FOR_DATA and action.stock_ex.ib_ticker not in [\"BVME.ETF\"]:\n myIB=MyIB()\n contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)\n cours_pres=myIB.get_last_price(contract)\n myIB.disconnect()\n else: #YF\n cours=vbt.YFData.fetch([symbol], period=\"2d\")\n cours_close=cours.get(\"Close\")\n cours_pres=cours_close[symbol].iloc[-1]\n \n return cours_pres\n except Exception as msg:\n print(symbol)\n print(\"exception in \" + __name__)\n print(msg)\n\ndef get_ratio(symbol,**kwargs):\n try:\n if kwargs.get(\"index\",False):\n action=Index.objects.get(symbol=symbol)\n else:\n action=Action.objects.get(symbol=symbol)\n \n if USE_IB_FOR_DATA and action.stock_ex.ib_ticker not in [\"BVME.ETF\"]:\n myIB=MyIB()\n contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)\n cours_pres=myIB.get_last_price(contract)\n cours_ref, cours_open=myIB.get_past_closing_price(contract)\n \n if kwargs.get(\"opening\",False):\n cours_pres=cours_open\n \n myIB.disconnect()\n else: #YF\n cours=vbt.YFData.fetch([symbol], period=\"2d\")\n cours_close=cours.get(\"Close\")\n\n cours_ref=cours_close[symbol].iloc[0]\n \n if kwargs.get(\"opening\",False):\n cours_open=cours.get(\"Open\")\n cours_pres=cours_open[symbol].iloc[-1]\n else:\n cours_pres=cours_close[symbol].iloc[-1]\n\n return rel_dif(cours_pres,\n cours_ref\n )*100\n except Exception as msg:\n print(symbol)\n print(\"exception in \" + __name__)\n print(msg)\n\nclass MyIB():\n def __init__(self):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n self.ib = IB()\n self.ib.connect(host=IB_LOCALHOST, port=IB_PORT, clientId=1)\n \n def cash_balance(self):\n try:\n for v in self.ib.accountSummary():\n if v.tag == 'CashBalance':\n return float(v.value)\n except:\n return 0\n \n def test(self,symbol):\n action=Action.objects.get(symbol=symbol)\n contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)\n print(self.ib.qualifyContracts(contract)) \n \n def retrieve(self,contract,period):\n \n bars = self.ib.reqHistoricalData(\n contract,\n endDateTime='',\n durationStr=period, #\"10 D\",\"1 M\"\n barSizeSetting='1 hour', #\"1 day\", \"1 min\"\n whatToShow='TRADES',\n useRTH=True,\n formatDate=1)\n\n return util.df(bars)\n \n def get_last_price(self,contract):\n m_data = self.ib.reqMktData(contract)\n while m_data.last != m_data.last: #Wait until data is in. \n self.ib.sleep(0.01)\n self.ib.cancelMktData(contract)\n return m_data.last\n \n def get_past_closing_price(self,contract):\n period=\"2 D\"\n bars = self.ib.reqHistoricalData(\n contract,\n endDateTime='',\n durationStr=period, #\"10 D\",\"1 M\"\n barSizeSetting='1 day', #\"1 day\", \"1 min\"\n whatToShow='TRADES',\n useRTH=True,\n formatDate=1)\n df=util.df(bars)\n return df.iloc[0][\"close\"], df.iloc[-1][\"open\"]\n \n def place(self,buy,ticker,currency,exchange,**kwargs): #quantity in euros\n if ticker==\"AAA\":\n print(\"ticker not found\")\n return \"\", 0\n else:\n contract = Stock(ticker, exchange, currency)\n self.ib.qualifyContracts(contract)\n \n if buy:\n order_size=kwargs.get(\"order_size\",0)\n last_price=self.get_last_price(contract)\n quantity=math.floor(order_size/last_price)\n order = MarketOrder('BUY', quantity)\n else:\n quantity=kwargs.get(\"quantity\",0)\n order = MarketOrder('SELL', quantity)\n trade = self.ib.placeOrder(contract, order)\n \n self.ib.sleep(1.0)\n if trade.orderStatus.status == 'Filled':\n fill = trade.fills[-1]\n txt=f'{fill.time} - {fill.execution.side} {fill.contract.symbol} {fill.execution.shares} @ {fill.execution.avgPrice}'\n price=fill.execution.avgPrice \n return txt, price, quantity\n \n def exit_order(self,symbol,strategy, exchange,short,**kwargs): \n #type check necessary for indexes\n try:\n pf= get_pf(strategy, exchange,short)\n ocap=get_order_capital(strategy, exchange,short)\n \n if kwargs.get(\"index\",False):\n index=Index.objects.get(symbol=symbol) #actually should be more complex\n if short:\n action=index.etf_short\n else:\n action=index.etf_long\n else:\n action=Action.objects.get(symbol=symbol)\n \n if symbol in pf.retrieve():\n c1 = Q(action=action)\n c2 = Q(active=True)\n \n order=Order.objects.filter(c1 & c2)\n\n #profit\n if len(order)>0:\n txt, order[0].exiting_price, quantity= self.place(False,\n action.ib_ticker(),\n action.currency.symbol, \n action.stock_ex.ib_ticker,\n quantity=order[0].quantity)\n order[0].exiting_date=timezone.now()\n \n if order[0].entering_price is not None: \n order[0].profit=order[0].exiting_price-order[0].entering_price\n order[0].profit_percent=(order[0].exiting_price/order[0].entering_price-1)*100\n \n order[0].active=False\n order[0].save()\n \n ocap.capital+=1\n ocap.save()\n pf.remove(symbol)\n pf.save()\n \n return True\n else:\n print(\"order not found \" + symbol)\n return False\n return False\n \n except Exception as msg:\n print(\"exception in exit\")\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass\n\n def entry_order(self,symbol,strategy, exchange,short,**kwargs): \n try:\n #type check necessary for indexes\n pf= get_pf(strategy, exchange,short)\n order_size=5000\n ocap=get_order_capital(strategy, exchange,short)\n #accountSummary\n \n if kwargs.get(\"index\",False):\n index=Index.objects.get(symbol=symbol)\n if short:\n action=index.etf_short\n else:\n action=index.etf_long\n else:\n action=Action.objects.get(symbol=symbol)\n \n excluded=Excluded.objects.get(name=\"all\") #list of actions completely excluded from entries\n\n if (symbol not in pf.retrieve() and \n symbol not in excluded.retrieve() and \n ocap.capital>0 and\n order_size<=self.cash_balance()):\n \n order=Order(action=action, pf=pf)\n txt, order.entering_price, order.quantity= self.place(True,\n action.ib_ticker(),\n action.currency.symbol,\n action.stock_ex.ib_ticker,\n order_size=order_size)\n \n if kwargs.get(\"sl\",False):\n sl=kwargs.get(\"sl\")\n order.sl_threshold=order.entering_price*(1-sl)\n \n order.save()\n pf.append(symbol)\n pf.save()\n ocap.capital-=1\n ocap.save()\n \n return True\n return False\n except Exception as msg:\n print(\"exception in \" + __name__)\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass\n\n def disconnect(self):\n self.ib.disconnect()\n\ndef check_hold_duration(symbol,strategy, exchange,short,**kwargs): \n #type check necessary for indexes\n try:\n pf= get_pf(strategy, exchange,short)\n \n #accountSummary\n if kwargs.get(\"index\",False):\n index=Index.objects.get(symbol=symbol)\n if short:\n action=index.etf_short\n else:\n action=index.etf_long\n else:\n action=Action.objects.get(symbol=symbol)\n\n if symbol in pf.retrieve():\n c1 = Q(action=action)\n c2 = Q(active=True)\n order=Order.objects.filter(c1 & c2)\n if len(order)>0:\n delta=timezone.now()-order[0].entering_date\n return delta.days\n \n return 0\n except Exception as msg:\n print(\"exception in \" + __name__)\n print(msg)\n return 0\n\ndef entry_order(symbol,strategy, exchange,short,**kwargs): \n if PERFORM_ORDER and DIC_PERFORM_ORDER[strategy]:\n myIB=MyIB()\n return myIB.entry_order(symbol,strategy, exchange,short,**kwargs), True\n else: \n return entry_order_test(symbol,strategy, exchange,short,**kwargs), False\n \ndef exit_order(symbol,strategy, exchange,short,**kwargs): \n if PERFORM_ORDER and DIC_PERFORM_ORDER[strategy]:\n myIB=MyIB()\n return myIB.exit_order(symbol,strategy, exchange,short,**kwargs), True\n else: \n return exit_order_test(symbol,strategy, exchange,short,**kwargs), False\n\ndef entry_order_test(symbol,strategy, exchange,short,**kwargs): \n try:\n #type check necessary for indexes\n pf= get_pf(strategy, exchange,short)\n ocap=get_order_capital(strategy, exchange,short)\n \n if kwargs.get(\"index\",False):\n index=Index.objects.get(symbol=symbol)\n if short:\n action=index.etf_short\n else:\n action=index.etf_long\n else:\n action=Action.objects.get(symbol=symbol)\n symbol2=action.symbol\n \n excluded=Excluded.objects.get(name=\"all\") #list of actions completely excluded from entries\n \n if (symbol2 not in pf.retrieve() and \n symbol2 not in excluded.retrieve() and\n ocap.capital>0):\n order=Order(action=action, pf=pf)\n order.entering_price=1.0\n \n order.save()\n #post telegram\n pf.append(symbol2)\n \n pf.save()\n ocap.capital-=1 #also for short\n ocap.save()\n \n return True\n return False\n except Exception as msg:\n print(\"exception in \" + __name__)\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass\n \ndef exit_order_test(symbol,strategy, exchange,short,**kwargs): \n try:\n pf= get_pf(strategy, exchange,short)\n ocap=get_order_capital(strategy, exchange,short)\n \n if kwargs.get(\"index\",False):\n index=Index.objects.get(symbol=symbol) #actually should be more complex\n if short:\n action=index.etf_short\n else:\n action=index.etf_long\n else:\n action=Action.objects.get(symbol=symbol)\n symbol2=action.symbol\n \n if symbol2 in pf.retrieve():\n c1 = Q(action=action)\n c2 = Q(active=True)\n \n order=Order.objects.filter(c1 & c2)\n #post telegram\n #price\n #profit\n if len(order)>0:\n order[0].exiting_date=timezone.now()\n order[0].active=False\n order[0].save()\n\n ocap.capital+=1 #also for short\n ocap.save()\n pf.remove(symbol2)\n pf.save()\n\n return True\n return False\n \n except Exception as msg:\n print(\"exception in \" + __name__)\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass\n\nclass Currency(models.Model):\n name=models.CharField(max_length=100, blank=False)\n symbol=models.CharField(max_length=100, blank=False,default=\"A\")\n \n def __str__(self):\n return self.name\n \nclass Fees(models.Model):\n name=models.CharField(max_length=100, blank=False, default=\"fee\")\n fixed=models.DecimalField(max_digits=100, decimal_places=5)\n percent=models.DecimalField(max_digits=100, decimal_places=5)\n \n def __str__(self):\n return self.name \n \nclass StockEx(models.Model):\n name=models.CharField(max_length=100, blank=False)\n fees=models.ForeignKey('Fees',on_delete=models.CASCADE)\n ib_ticker=models.CharField(max_length=15, blank=True,default=\"AAA\")\n opening_time=models.TimeField(default=\"09:00:00\")\n closing_time=models.TimeField(default=\"17:00:00\")\n \n def __str__(self):\n return self.name \n\n\nclass Strategy(models.Model):\n name=models.CharField(max_length=100, blank=False)\n \n def __str__(self):\n return self.name\n\n### Index is like action, but it had to be separated, as an index cannot be bought directly\nclass Index(models.Model):\n symbol=models.CharField(max_length=15, blank=False, primary_key=True)\n ib_ticker=models.CharField(max_length=15, blank=True,default=\"AAA\")\n name=models.CharField(max_length=100, blank=False)\n stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE)\n currency=models.ForeignKey('Currency',on_delete=models.CASCADE)\n etf_long=models.ForeignKey('Action',on_delete=models.PROTECT,default=0,related_name='etf_long')\n etf_short=models.ForeignKey('Action',on_delete=models.PROTECT, default=0,related_name='etf_short')\n \n class Meta:\n ordering = [\"name\"]\n\n def ib_ticker(self):\n return self.ib_ticker\n \n def __str__(self):\n return self.name \n\nclass Action(models.Model):\n symbol=models.CharField(max_length=15, blank=False, primary_key=True)\n ib_ticker=models.CharField(max_length=15, blank=True,default=\"AAA\")\n name=models.CharField(max_length=100, blank=False)\n stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE)\n currency=models.ForeignKey('Currency',on_delete=models.CASCADE)\n category=models.ForeignKey('ActionCategory',on_delete=models.CASCADE,blank=True)\n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=0)\n \n class Meta:\n ordering = [\"name\"]\n \n def ib_ticker(self):\n t=self.symbol.split(\".\")\n return t[0] \n \n def __str__(self):\n return self.name\n\nclass Order(models.Model):\n action=models.ForeignKey('Action',on_delete=models.CASCADE)\n pf=models.ForeignKey('PF',on_delete=models.SET_NULL,blank=True,null=True)\n active=models.BooleanField(blank=False,default=True)\n entering_date=models.DateTimeField(null=False, blank=False, auto_now_add=True)#default=timezone.now())\n exiting_date=models.DateTimeField(null=True, blank=True)\n entering_price=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n exiting_price=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n sl_threshold=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n profit=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n profit_percent=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n quantity=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n\n def __str__(self):\n return self.action.name + \" \"+ str(self.entering_date)\n\ndef pf_retrieve_all(**kwargs):\n arr=[]\n \n for pf in PF.objects.filter(short=kwargs.get(\"short\",False)):\n cat=ActionCategory.objects.get(short=\"ACT\")\n c1 = Q(category=cat)\n if kwargs.get(\"opening\")==\"9h\":\n stockEx1=StockEx.objects.filter(name=\"Paris\")\n stockEx2=StockEx.objects.filter(name=\"XETRA\")\n c2 = Q(stock_ex=stockEx1[0])\n c3 = Q(stock_ex=stockEx2[0])\n actions=pf.actions.filter(c1 & (c2|c3))\n elif kwargs.get(\"opening\")==\"15h\":\n stockEx1=StockEx.objects.filter(name=\"Nasdaq\")\n c2 = Q(stock_ex=stockEx1[0])\n actions=pf.actions.filter(c1 & c2)\n else:\n actions=pf.actions.filter(c1)\n \n for action in actions:\n if not action.symbol in arr:\n arr.append(action.symbol)\n return arr\n\n### Portfolio for a given strategy (used as name presently)\nclass PF(models.Model):\n # can be replaced with ib.positions() or ib.portfolio()\n name=models.CharField(max_length=100, blank=False)\n actions=models.ManyToManyField(Action,blank=True)\n short=models.BooleanField(blank=False,default=False)\n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)\n stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)\n \n def __str__(self):\n return self.name\n \n def retrieve(self):\n arr=[]\n for action in self.actions.all():\n arr.append(action.symbol)\n return arr\n\n def remove(self,symbol):\n a = Action.objects.get(symbol=symbol)\n \n try:\n self.actions.remove(a)\n self.save()\n except Exception as msg:\n print(\"exception in remove_symbol\")\n print(symbol)\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass\n\n def append(self,symbol):\n try:\n a = Action.objects.get(symbol=symbol)\n self.actions.add(a)\n self.save()\n except Exception as msg:\n print(\"exception in \" + __name__)\n print(symbol)\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass \n\ndef get_pf(strategy, exchange,short):\n s=Strategy.objects.get(name=strategy)\n e=StockEx.objects.get(name=exchange)\n\n c1 = Q(stock_ex=e)\n c2 = Q(strategy=s) \n c3 = Q(short=short)\n\n return PF.objects.get(c1 & c2 & c3)\n\n\n### To distinguish between ETF, actions, indexes...\nclass ActionCategory(models.Model):\n short=models.CharField(max_length=15, blank=False, default=\"AAA\", primary_key=True)\n name=models.CharField(max_length=100, blank=False)\n\n def __str__(self):\n return self.name \n\n###To define the capital assigned to one strategy.\n###Not used presently \nclass Capital(models.Model):\n #self.ib.accountSummary()\n capital=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n name=models.CharField(max_length=100, blank=False,default=\"\")\n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)\n stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)\n \n def __str__(self):\n return self.name \n\ndef get_capital(strategy, exchange,short):\n s=Strategy.objects.get(name=strategy)\n e=StockEx.objects.get(name=exchange)\n\n c1 = Q(stock_ex=e)\n c2 = Q(strategy=s) \n c3 = Q(short=short)\n\n return Capital.objects.get(c1 & c2 & c3)\n\n###To define the number of orders assigned to one strategy\n###1 means that only one action can be owned at a time using this strategy\n\nclass OrderCapital(models.Model):\n capital=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n name=models.CharField(max_length=100, blank=False,default=\"\")\n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)\n stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)\n \n def __str__(self):\n return self.name \n\ndef get_order_capital(strategy, exchange,short):\n s=Strategy.objects.get(name=strategy)\n e=StockEx.objects.get(name=exchange)\n\n c1 = Q(stock_ex=e)\n c2 = Q(strategy=s) \n\n return OrderCapital.objects.get(c1 & c2)\n\n###For strategy using two time frame, in the slow one (10 days) candidates are defined\n###And on daily basis the other strategy decides which of the candidate is really bought or sold\n\nclass Candidates(models.Model):\n name=models.CharField(max_length=100, blank=False)\n actions=models.ManyToManyField(Action,blank=True) \n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=1)\n stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)\n \n def reset(self):\n for a in self.actions.all():\n self.actions.remove(a)\n self.save()\n \n def append(self,symbol): #so we can name as for list\n a = Action.objects.get(symbol=symbol)\n self.actions.add(a)\n self.save()\n \n def retrieve(self):\n arr=[]\n for action in self.actions.all():\n arr.append(action.symbol)\n return arr\n \n def __str__(self):\n return self.name \n\ndef get_candidates(strategy, exchange):\n s=Strategy.objects.get(name=strategy)\n e=StockEx.objects.get(name=exchange)\n\n c1 = Q(stock_ex=e)\n c2 = Q(strategy=s) \n\n return Candidates.objects.get(c1 & c2)\n \n### List of actions provisory excluded for a strategy as it risks to perform bad\n \nclass Excluded(models.Model):\n name=models.CharField(max_length=100, blank=False)\n actions=models.ManyToManyField(Action,blank=True) \n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)\n \n def reset(self):\n for a in self.actions.all():\n self.actions.remove(a)\n self.save()\n \n def append(self,symbol):\n a = Action.objects.get(symbol=symbol)\n self.actions.add(a)\n self.save()\n \n def remove(self,symbol):\n a = Action.objects.get(symbol=symbol)\n \n try:\n self.actions.remove(a)\n self.save()\n except Exception as msg:\n print(\"exception in \" + __name__)\n print(symbol)\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass\n \n def retrieve(self):\n arr=[]\n for action in self.actions.all():\n arr.append(action.symbol)\n return arr\n \n def __str__(self):\n return self.name \n \n### Define a list of actions and indexes that can be traded using the defined strategy\nclass StratCandidates(models.Model):\n name=models.CharField(max_length=100, blank=False)\n actions=models.ManyToManyField(Action,blank=True) \n indexes=models.ManyToManyField(Index,blank=True) \n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=0)\n \n def retrieve(self):\n arr=[]\n for action in self.actions.all():\n arr.append(action.symbol)\n return arr \n \n def __str__(self):\n return self.name " ]
[ [ "numpy.shape", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
johnson7788/lit
[ "3eb824b01e0f72a5486124b16056bf912465debc" ]
[ "lit_nlp/examples/sst_pytorch_demo.py" ]
[ "# Lint as: python3\nr\"\"\"Code example for a custom model, using PyTorch.\n\nThis demo shows how to use a custom model with LIT, in just a few lines of code.\nWe'll use a transformers model, with a minimal amount of code to implement the\nLIT API. Compared to models/glue_models.py, this has fewer features, but the\ncode is more readable.\nThis demo is similar in functionality to simple_tf2_demo.py, but uses PyTorch\ninstead of TensorFlow 2.\nThe transformers library can load weights from either,\nso you can use any saved model compatible with the underlying model class\n(AutoModelForSequenceClassification). To train something for this demo, you can:\n- Use quickstart_sst_demo.py, and set --model_path to somewhere durable\n- Or: Use tools/glue_trainer.py\n- Or: Use any fine-tuning code that works with transformers, such as\nhttps://github.com/huggingface/transformers#quick-tour-of-the-fine-tuningusage-scripts\nTo run locally:\n python -m lit_nlp.examples.simple_pytorch_demo \\\n --port=5432 --model_path=/path/to/saved/model\nThen navigate to localhost:5432 to access the demo UI.\nNOTE: this demo still uses TensorFlow Datasets (which depends on TensorFlow) to\nload the data. However, the output of glue.SST2Data is just NumPy arrays and\nplain Python data, and you can easily replace this with a different library or\ndirectly loading from CSV.\n\"\"\"\nimport re\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom lit_nlp import dev_server\nfrom lit_nlp import server_flags\nfrom lit_nlp.api import model as lit_model\nfrom lit_nlp.api import types as lit_types\nfrom lit_nlp.examples.datasets import glue\nfrom lit_nlp.lib import utils\nimport torch\nimport transformers\n\n# NOTE: additional flags defined in server_flags.py\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n \"model_path\", None,\n \"Path to trained model, in standard transformers format, e.g. as \"\n \"saved by model.save_pretrained() and tokenizer.save_pretrained()\")\n\n\ndef _from_pretrained(cls, *args, **kw):\n \"\"\"Load a transformers model in PyTorch, with fallback to TF2/Keras weights.\"\"\"\n try:\n return cls.from_pretrained(*args, **kw)\n except OSError as e:\n logging.warning(\"Caught OSError loading model: %s\", e)\n logging.warning(\n \"Re-trying to convert from TensorFlow checkpoint (from_tf=True)\")\n return cls.from_pretrained(*args, from_tf=True, **kw)\n\n\nclass SimpleSentimentModel(lit_model.Model):\n \"\"\"Simple sentiment analysis model.\"\"\"\n\n LABELS = [\"0\", \"1\"] # negative, positive\n compute_grads: bool = True # if True, compute and return gradients.\n\n def __init__(self, model_name_or_path):\n self.tokenizer = transformers.AutoTokenizer.from_pretrained(\n model_name_or_path)\n model_config = transformers.AutoConfig.from_pretrained(\n model_name_or_path,\n num_labels=2,\n output_hidden_states=True,\n output_attentions=True,\n )\n # This is a just a regular PyTorch model.\n self.model = _from_pretrained(\n transformers.AutoModelForSequenceClassification,\n model_name_or_path,\n config=model_config)\n self.model.eval()\n\n ##\n # LIT API implementation\n def max_minibatch_size(self):\n # This tells lit_model.Model.predict() how to batch inputs to\n # predict_minibatch().\n # Alternately, you can just override predict() and handle batching yourself.\n return 32\n\n def predict_minibatch(self, inputs):\n\n # Preprocess to ids and masks, and make the input batch.\n encoded_input = self.tokenizer.batch_encode_plus(\n [ex[\"sentence\"] for ex in inputs],\n return_tensors=\"pt\",\n add_special_tokens=True,\n max_length=128,\n padding=\"longest\",\n truncation=\"longest_first\")\n\n # Check and send to cuda (GPU) if available\n if torch.cuda.is_available():\n self.model.cuda()\n for tensor in encoded_input:\n encoded_input[tensor] = encoded_input[tensor].cuda()\n\n # Run a forward pass.\n with torch.set_grad_enabled(self.compute_grads):\n out: transformers.modeling_outputs.SequenceClassifierOutput = \\\n self.model(**encoded_input)\n\n # Post-process outputs.\n batched_outputs = {\n \"probas\": torch.nn.functional.softmax(out.logits, dim=-1),\n \"input_ids\": encoded_input[\"input_ids\"],\n \"ntok\": torch.sum(encoded_input[\"attention_mask\"], dim=1),\n \"cls_emb\": out.hidden_states[-1][:, 0], # last layer, first token\n }\n\n # Add attention layers to batched_outputs\n assert len(out.attentions) == self.model.config.num_hidden_layers\n for i, layer_attention in enumerate(out.attentions):\n batched_outputs[f\"layer_{i}/attention\"] = layer_attention\n\n # Request gradients after the forward pass.\n # Note: hidden_states[0] includes position and segment encodings, as well as\n # subword embeddings.\n if self.compute_grads:\n # <torch.float32>[batch_size, num_tokens, emb_dim]\n scalar_pred_for_gradients = torch.max(\n batched_outputs[\"probas\"], dim=1, keepdim=False, out=None)[0]\n batched_outputs[\"input_emb_grad\"] = torch.autograd.grad(\n scalar_pred_for_gradients,\n out.hidden_states[0],\n grad_outputs=torch.ones_like(scalar_pred_for_gradients))[0]\n\n # Post-process outputs.\n # Return as NumPy for further processing.\n detached_outputs = {\n k: v.cpu().detach().numpy() for k, v in batched_outputs.items()}\n\n # Unbatch outputs so we get one record per input example.\n for output in utils.unbatch_preds(detached_outputs):\n ntok = output.pop(\"ntok\")\n output[\"tokens\"] = self.tokenizer.convert_ids_to_tokens(\n output.pop(\"input_ids\")[:ntok])\n\n # set token gradients\n if self.compute_grads:\n output[\"token_grad_sentence\"] = output[\"input_emb_grad\"][:ntok]\n\n # Process attention.\n for key in output:\n if not re.match(r\"layer_(\\d+)/attention\", key):\n continue\n # Select only real tokens, since most of this matrix is padding.\n # <float32>[num_heads, max_seq_length, max_seq_length]\n # -> <float32>[num_heads, num_tokens, num_tokens]\n output[key] = output[key][:, :ntok, :ntok].transpose((0, 2, 1))\n # Make a copy of this array to avoid memory leaks, since NumPy otherwise\n # keeps a pointer around that prevents the source array from being GCed.\n output[key] = output[key].copy()\n yield output\n\n def input_spec(self) -> lit_types.Spec:\n return {\n \"sentence\": lit_types.TextSegment(),\n \"label\": lit_types.CategoryLabel(vocab=self.LABELS, required=False)\n }\n\n def output_spec(self) -> lit_types.Spec:\n ret = {\n \"tokens\": lit_types.Tokens(),\n \"probas\": lit_types.MulticlassPreds(parent=\"label\", vocab=self.LABELS),\n \"cls_emb\": lit_types.Embeddings()\n }\n # Gradients, if requested.\n if self.compute_grads:\n ret[\"token_grad_sentence\"] = lit_types.TokenGradients(\n align=\"tokens\")\n\n # Attention heads, one field for each layer.\n for i in range(self.model.config.num_hidden_layers):\n ret[f\"layer_{i}/attention\"] = lit_types.AttentionHeads(\n align_in=\"tokens\", align_out=\"tokens\")\n return ret\n\n\ndef main(_):\n # Normally path is a directory; if it's an archive file, download and\n # extract to the transformers cache.\n model_path = FLAGS.model_path\n if model_path.endswith(\".tar.gz\"):\n model_path = transformers.file_utils.cached_path(\n model_path, extract_compressed_file=True)\n\n # Load the model we defined above.\n models = {\"sst\": SimpleSentimentModel(model_path)}\n # Load SST-2 validation set from TFDS.\n datasets = {\"sst_dev\": glue.SST2Data(\"validation\")}\n\n # Start the LIT server. See server_flags.py for server options.\n lit_demo = dev_server.Server(models, datasets, **server_flags.get_flags())\n lit_demo.serve()\n\n\nif __name__ == \"__main__\":\n app.run(main)\n" ]
[ [ "torch.nn.functional.softmax", "torch.max", "torch.sum", "torch.set_grad_enabled", "torch.cuda.is_available", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dataength/automating-your-data-pipeline-with-apache-airflow
[ "90a1351de6de78c0f0a6fb2e778e2ba3b7c78f5e" ]
[ "machine-learning-pipeline/airflow/dags/train_simple_model.py" ]
[ "import pickle\n\nfrom airflow import DAG\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils import timezone\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndefault_args = {\n 'owner': 'ODDS',\n}\ndag = DAG(\n 'train_simple_model',\n schedule_interval='*/15 * * * *',\n default_args=default_args,\n start_date=timezone.datetime(2020, 8, 1),\n catchup=False\n)\n\nstart = DummyOperator(task_id='start', dag=dag)\n\n\ndef train_func():\n clf = RandomForestClassifier(random_state=0)\n X = [[ 1, 2, 3],\n [11, 12, 13]]\n y = [0, 1]\n clf.fit(X, y)\n\n MODEL_PATH = '/Users/zkan/Projects/dataength/' \\\n 'automating-your-data-pipeline-with-apache-airflow/' \\\n 'machine-learning-pipeline/airflow/dags'\n\n with open(f'{MODEL_PATH}/models/clf.model', 'wb') as outfile:\n pickle.dump(clf, outfile)\n\n\ntrain = PythonOperator(\n task_id='train',\n python_callable=train_func,\n dag=dag,\n)\n\nend = DummyOperator(task_id='end', dag=dag)\n\nstart >> train >> end\n" ]
[ [ "sklearn.ensemble.RandomForestClassifier" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
toddlerya/AnalyzeNPC
[ "5d16f994ec34300a3050463aad08ad3a1ec1eaba" ]
[ "nighteen_cpc.py" ]
[ "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author: toddler\n\nimport jieba\nimport re\nimport os\nfrom collections import Counter\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\n\n\ndef cut_analyze(input_file):\n \"\"\"\n :param input_file: 输入带切词分析的文本路径\n :return: (list1, list2) list1切词处理后的列表结果, list2输出切词处理排序后的词频结果, 列表-元祖嵌套结果\n \"\"\"\n cpc_dict_path = u'user_dict/cpc_dictionary.txt'\n stop_words_path = u'user_dict/stopword.txt'\n\n with open(input_file) as f:\n content = f.read()\n\n with open(stop_words_path) as sf:\n st_content = sf.readlines()\n\n jieba.load_userdict(cpc_dict_path) # 加载针对全国人民代表大会的分词词典\n\n stop_words = [line.strip().decode('utf-8') for line in st_content] # 将读取的数据都转为unicode处理\n\n seg_list = jieba.cut(content, cut_all=False) # 精确模式\n\n filter_seg_list = list()\n\n for seg in seg_list:\n goal_word = ''.join(re.findall(u'[\\u4e00-\\u9fa5]+', seg)).strip() # 过滤所有非中文字符内容\n if len(goal_word) != 0 and not stop_words.__contains__(goal_word): # 过滤分词结果中的停词内容\n # filter_seg_list.append(goal_word.encode('utf-8')) # 将unicode的文本转为utf-8保存到列表以备后续处理\n filter_seg_list.append(goal_word)\n\n seg_counter_all = Counter(filter_seg_list).most_common() # 对切词结果按照词频排序\n\n # for item in seg_counter_all:\n # print \"词语: {0} - 频数: {1}\".format(item[0].encode('utf-8'), item[1])\n\n return filter_seg_list, seg_counter_all\n\n\ndef main():\n input_file_path = u'input_file/nighteen-cpc.txt'\n cut_data, sort_data = cut_analyze(input_file=input_file_path)\n font = os.path.abspath('assets/msyh.ttf')\n wc = WordCloud(collocations=False, font_path=font, width=3600, height=3600, margin=2)\n wc.generate_from_frequencies(dict(sort_data))\n plt.figure()\n plt.imshow(wc)\n plt.axis('off')\n plt.show()\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ThomasHoppe/concept_formation
[ "2468fea78ba46804bf44228519eb33ebc5780d31" ]
[ "concept_formation/tests/benchmark_cobweb.py" ]
[ "from random import randint\nfrom timeit import timeit\n\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as mpatches\n\n\ndef generate_dataset(n_inst, n_attr, n_val):\n instances = []\n for i in range(n_inst):\n i = {}\n for j in range(n_attr):\n i[str(j)] = randint(1, n_val)\n instances.append(i)\n return instances\n\n\ndef time(n_inst, n_attr, n_val):\n return timeit('tree.fit(x)',\n setup=('from __main__ import generate_dataset; '\n 'from concept_formation.cobweb import CobwebTree; '\n 'tree = CobwebTree(); '\n 'x = generate_dataset(%i, %i, %i)' % (n_inst, n_attr,\n n_val)),\n number=1)\n\n\nif __name__ == \"__main__\":\n # 5 attributes\n sizes = [10, 30, 60, 120, 180, 220]\n times = [time(i, 5, 5) for i in sizes]\n plt.plot(sizes, times, 'ro')\n plt.plot(sizes, times, 'r-')\n\n # 10 attributes\n times = [time(i, 10, 5) for i in sizes]\n plt.plot(sizes, times, 'bo')\n plt.plot(sizes, times, 'b-')\n\n # 20 attributes\n times = [time(i, 20, 5) for i in sizes]\n plt.plot(sizes, times, 'go')\n plt.plot(sizes, times, 'g-')\n\n red_patch = mpatches.Patch(color='red', label='# attr=5')\n blue_patch = mpatches.Patch(color='blue', label='# attr=10')\n green_patch = mpatches.Patch(color='green', label='# attr=20')\n plt.legend(handles=[red_patch, blue_patch, green_patch], loc=2)\n\n plt.xlabel('Number of training instances (5 possible values / attr)')\n plt.ylabel('Runtime in Seconds')\n plt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.patches.Patch", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ljw23/ConvLab-2
[ "13d48ea0e441701bd66100689b6c25b561f15525", "13d48ea0e441701bd66100689b6c25b561f15525", "13d48ea0e441701bd66100689b6c25b561f15525" ]
[ "convlab2/policy/larl/multiwoz/latent_dialog/enc2dec/decoders.py", "convlab2/e2e/rnn_rollout/engines/selection_engine.py", "convlab2/policy/larl/multiwoz/latent_dialog/models_task.py" ]
[ "import torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport numpy as np\nfrom convlab2.policy.larl.multiwoz.latent_dialog.enc2dec.base_modules import BaseRNN\nfrom convlab2.policy.larl.multiwoz.latent_dialog.utils import cast_type, LONG, FLOAT\nfrom convlab2.policy.larl.multiwoz.latent_dialog.corpora import DECODING_MASKED_TOKENS, EOS\n\n\nTEACH_FORCE = 'teacher_forcing'\nTEACH_GEN = 'teacher_gen'\nGEN = 'gen'\nGEN_VALID = 'gen_valid'\n\n\nclass Attention(nn.Module):\n def __init__(self, dec_cell_size, ctx_cell_size, attn_mode, project):\n super(Attention, self).__init__()\n self.dec_cell_size = dec_cell_size\n self.ctx_cell_size = ctx_cell_size\n self.attn_mode = attn_mode\n if project:\n self.linear_out = nn.Linear(\n dec_cell_size+ctx_cell_size, dec_cell_size)\n else:\n self.linear_out = None\n\n if attn_mode == 'general':\n self.dec_w = nn.Linear(dec_cell_size, ctx_cell_size)\n elif attn_mode == 'cat':\n self.dec_w = nn.Linear(dec_cell_size, dec_cell_size)\n self.attn_w = nn.Linear(ctx_cell_size, dec_cell_size)\n self.query_w = nn.Linear(dec_cell_size, 1)\n\n def forward(self, output, context):\n # output: (batch_size, output_seq_len, dec_cell_size)\n # context: (batch_size, max_ctx_len, ctx_cell_size)\n batch_size = output.size(0)\n max_ctx_len = context.size(1)\n\n if self.attn_mode == 'dot':\n # (batch_size, output_seq_len, max_ctx_len)\n attn = th.bmm(output, context.transpose(1, 2))\n elif self.attn_mode == 'general':\n # (batch_size, output_seq_len, ctx_cell_size)\n mapped_output = self.dec_w(output)\n # (batch_size, output_seq_len, max_ctx_len)\n attn = th.bmm(mapped_output, context.transpose(1, 2))\n elif self.attn_mode == 'cat':\n # (batch_size, output_seq_len, dec_cell_size)\n mapped_output = self.dec_w(output)\n # (batch_size, max_ctx_len, dec_cell_size)\n mapped_attn = self.attn_w(context)\n # (batch_size, output_seq_len, max_ctx_len, dec_cell_size)\n tiled_output = mapped_output.unsqueeze(\n 2).repeat(1, 1, max_ctx_len, 1)\n # (batch_size, 1, max_ctx_len, dec_cell_size)\n tiled_attn = mapped_attn.unsqueeze(1)\n # (batch_size, output_seq_len, max_ctx_len, dec_cell_size)\n fc1 = F.tanh(tiled_output+tiled_attn)\n # (batch_size, otuput_seq_len, max_ctx_len)\n attn = self.query_w(fc1).squeeze(-1)\n else:\n raise ValueError('Unknown attention mode')\n\n # TODO mask\n # if self.mask is not None:\n\n # (batch_size, output_seq_len, max_ctx_len)\n attn = F.softmax(attn.view(-1, max_ctx_len),\n dim=1).view(batch_size, -1, max_ctx_len)\n # (batch_size, output_seq_len, ctx_cell_size)\n mix = th.bmm(attn, context)\n # (batch_size, output_seq_len, dec_cell_size+ctx_cell_size)\n combined = th.cat((mix, output), dim=2)\n if self.linear_out is None:\n return combined, attn\n else:\n output = F.tanh(\n self.linear_out(combined.view(-1, self.dec_cell_size+self.ctx_cell_size))).view(\n batch_size, -1, self.dec_cell_size) # (batch_size, output_seq_len, dec_cell_size)\n return output, attn\n\n\nclass DecoderRNN(BaseRNN):\n def __init__(self, input_dropout_p, rnn_cell, input_size, hidden_size, num_layers, output_dropout_p,\n bidirectional, vocab_size, use_attn, ctx_cell_size, attn_mode, sys_id, eos_id, use_gpu,\n max_dec_len, embedding=None):\n\n super(DecoderRNN, self).__init__(input_dropout_p=input_dropout_p,\n rnn_cell=rnn_cell,\n input_size=input_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n output_dropout_p=output_dropout_p,\n bidirectional=bidirectional)\n\n # TODO embedding is None or not\n if embedding is None:\n self.embedding = nn.Embedding(vocab_size, input_size)\n else:\n self.embedding = embedding\n\n # share parameters between encoder and decoder\n # self.rnn = ctx_encoder.rnn\n # self.FC = nn.Linear(input_size, utt_encoder.output_size)\n\n self.use_attn = use_attn\n if self.use_attn:\n self.attention = Attention(dec_cell_size=hidden_size,\n ctx_cell_size=ctx_cell_size,\n attn_mode=attn_mode,\n project=True)\n\n self.dec_cell_size = hidden_size\n self.output_size = vocab_size\n self.project = nn.Linear(self.dec_cell_size, self.output_size)\n self.log_softmax = F.log_softmax\n\n self.sys_id = sys_id\n self.eos_id = eos_id\n self.use_gpu = use_gpu\n self.max_dec_len = max_dec_len\n\n def forward(self, batch_size, dec_inputs, dec_init_state, attn_context, mode, gen_type, beam_size, goal_hid=None):\n # dec_inputs: (batch_size, response_size-1)\n # attn_context: (batch_size, max_ctx_len, ctx_cell_size)\n # goal_hid: (batch_size, goal_nhid)\n\n ret_dict = dict()\n\n if self.use_attn:\n ret_dict[DecoderRNN.KEY_ATTN_SCORE] = list()\n\n if mode == GEN:\n dec_inputs = None\n\n if gen_type != 'beam':\n beam_size = 1\n\n if dec_inputs is not None:\n decoder_input = dec_inputs\n else:\n # prepare the BOS inputs\n with th.no_grad():\n bos_var = Variable(th.LongTensor([self.sys_id]))\n bos_var = cast_type(bos_var, LONG, self.use_gpu)\n decoder_input = bos_var.expand(\n batch_size*beam_size, 1) # (batch_size, 1)\n\n if mode == GEN and gen_type == 'beam':\n # TODO if beam search, repeat the initial states of the RNN\n pass\n else:\n decoder_hidden_state = dec_init_state\n\n # list of logprob | max_dec_len*(batch_size, 1, vocab_size)\n prob_outputs = []\n symbol_outputs = [] # list of word ids | max_dec_len*(batch_size, 1)\n # back_pointers = []\n # lengths = blabla...\n\n def decode(step, cum_sum, step_output, step_attn):\n prob_outputs.append(step_output)\n step_output_slice = step_output.squeeze(\n 1) # (batch_size, vocab_size)\n if self.use_attn:\n ret_dict[DecoderRNN.KEY_ATTN_SCORE].append(step_attn)\n\n if gen_type == 'greedy':\n _, symbols = step_output_slice.topk(1) # (batch_size, 1)\n elif gen_type == 'sample':\n # TODO FIXME\n # symbols = self.gumbel_max(step_output_slice)\n pass\n elif gen_type == 'beam':\n # TODO\n pass\n else:\n raise ValueError('Unsupported decoding mode')\n\n symbol_outputs.append(symbols)\n\n return cum_sum, symbols\n\n if mode == TEACH_FORCE:\n prob_outputs, decoder_hidden_state, attn = self.forward_step(\n input_var=decoder_input, hidden_state=decoder_hidden_state, encoder_outputs=attn_context, goal_hid=goal_hid)\n else:\n # do free running here\n cum_sum = None\n for step in range(self.max_dec_len):\n # Input:\n # decoder_input: (batch_size, 1)\n # decoder_hidden_state: tuple: (h, c)\n # attn_context: (batch_size, max_ctx_len, ctx_cell_size)\n # goal_hid: (batch_size, goal_nhid)\n # Output:\n # decoder_output: (batch_size, 1, vocab_size)\n # decoder_hidden_state: tuple: (h, c)\n # step_attn: (batch_size, 1, max_ctx_len)\n decoder_output, decoder_hidden_state, step_attn = self.forward_step(\n decoder_input, decoder_hidden_state, attn_context, goal_hid=goal_hid)\n cum_sum, symbols = decode(\n step, cum_sum, decoder_output, step_attn)\n decoder_input = symbols\n\n # (batch_size, max_dec_len, vocab_size)\n prob_outputs = th.cat(prob_outputs, dim=1)\n\n # back tracking to recover the 1-best in beam search\n # if gen_type == 'beam':\n\n ret_dict[DecoderRNN.KEY_SEQUENCE] = symbol_outputs\n\n # prob_outputs: (batch_size, max_dec_len, vocab_size)\n # decoder_hidden_state: tuple: (h, c)\n # ret_dict[DecoderRNN.KEY_ATTN_SCORE]: max_dec_len*(batch_size, 1, max_ctx_len)\n # ret_dict[DecoderRNN.KEY_SEQUENCE]: max_dec_len*(batch_size, 1)\n return prob_outputs, decoder_hidden_state, ret_dict\n\n def forward_step(self, input_var, hidden_state, encoder_outputs, goal_hid):\n # input_var: (batch_size, response_size-1 i.e. output_seq_len)\n # hidden_state: tuple: (h, c)\n # encoder_outputs: (batch_size, max_ctx_len, ctx_cell_size)\n # goal_hid: (batch_size, goal_nhid)\n batch_size, output_seq_len = input_var.size()\n # (batch_size, output_seq_len, embedding_dim)\n embedded = self.embedding(input_var)\n\n # add goals\n if goal_hid is not None:\n # (batch_size, 1, goal_nhid)\n goal_hid = goal_hid.view(goal_hid.size(0), 1, goal_hid.size(1))\n # (batch_size, output_seq_len, goal_nhid)\n goal_rep = goal_hid.repeat(1, output_seq_len, 1)\n # (batch_size, output_seq_len, embedding_dim+goal_nhid)\n embedded = th.cat([embedded, goal_rep], dim=2)\n\n embedded = self.input_dropout(embedded)\n\n # ############\n # embedded = self.FC(embedded.view(-1, embedded.size(-1))).view(batch_size, output_seq_len, -1)\n\n # output: (batch_size, output_seq_len, dec_cell_size)\n # hidden: tuple: (h, c)\n output, hidden_s = self.rnn(embedded, hidden_state)\n\n attn = None\n if self.use_attn:\n # output: (batch_size, output_seq_len, dec_cell_size)\n # encoder_outputs: (batch_size, max_ctx_len, ctx_cell_size)\n # attn: (batch_size, output_seq_len, max_ctx_len)\n output, attn = self.attention(output, encoder_outputs)\n\n # (batch_size*output_seq_len, vocab_size)\n logits = self.project(output.contiguous().view(-1, self.dec_cell_size))\n prediction = self.log_softmax(logits, dim=logits.dim(\n )-1).view(batch_size, output_seq_len, -1) # (batch_size, output_seq_len, vocab_size)\n return prediction, hidden_s, attn\n\n # special for rl\n def _step(self, input_var, hidden_state, encoder_outputs, goal_hid):\n # input_var: (1, 1)\n # hidden_state: tuple: (h, c)\n # encoder_outputs: (1, max_dlg_len, dlg_cell_size)\n # goal_hid: (1, goal_nhid)\n batch_size, output_seq_len = input_var.size()\n embedded = self.embedding(input_var) # (1, 1, embedding_dim)\n\n if goal_hid is not None:\n goal_hid = goal_hid.view(goal_hid.size(\n 0), 1, goal_hid.size(1)) # (1, 1, goal_nhid)\n goal_rep = goal_hid.repeat(\n 1, output_seq_len, 1) # (1, 1, goal_nhid)\n # (1, 1, embedding_dim+goal_nhid)\n embedded = th.cat([embedded, goal_rep], dim=2)\n\n embedded = self.input_dropout(embedded)\n\n # ############\n # embedded = self.FC(embedded.view(-1, embedded.size(-1))).view(batch_size, output_seq_len, -1)\n\n # output: (1, 1, dec_cell_size)\n # hidden: tuple: (h, c)\n output, hidden_s = self.rnn(embedded, hidden_state)\n\n attn = None\n if self.use_attn:\n # output: (1, 1, dec_cell_size)\n # encoder_outputs: (1, max_dlg_len, dlg_cell_size)\n # attn: (1, 1, max_dlg_len)\n output, attn = self.attention(output, encoder_outputs)\n\n # (1*1, vocab_size)\n logits = self.project(output.view(-1, self.dec_cell_size))\n prediction = logits.view(\n batch_size, output_seq_len, -1) # (1, 1, vocab_size)\n # prediction = self.log_softmax(logits, dim=logits.dim()-1).view(batch_size, output_seq_len, -1) # (batch_size, output_seq_len, vocab_size)\n return prediction, hidden_s\n\n # special for rl\n def write(self, input_var, hidden_state, encoder_outputs, max_words, vocab, stop_tokens, goal_hid=None, mask=True,\n decoding_masked_tokens=DECODING_MASKED_TOKENS):\n # input_var: (1, 1)\n # hidden_state: tuple: (h, c)\n # encoder_outputs: max_dlg_len*(1, 1, dlg_cell_size)\n # goal_hid: (1, goal_nhid)\n logprob_outputs = [] # list of logprob | max_dec_len*(1, )\n symbol_outputs = [] # list of word ids | max_dec_len*(1, )\n decoder_input = input_var\n decoder_hidden_state = hidden_state\n if type(encoder_outputs) is list:\n # (1, max_dlg_len, dlg_cell_size)\n encoder_outputs = th.cat(encoder_outputs, 1)\n # print('encoder_outputs.size() = {}'.format(encoder_outputs.size()))\n\n if mask:\n special_token_mask = Variable(th.FloatTensor(\n [-999. if token in decoding_masked_tokens else 0. for token in vocab]))\n special_token_mask = cast_type(\n special_token_mask, FLOAT, self.use_gpu) # (vocab_size, )\n\n def _sample(dec_output, num_i):\n # dec_output: (1, 1, vocab_size), need to softmax and log_softmax\n dec_output = dec_output.view(-1) # (vocab_size, )\n # TODO temperature\n prob = F.softmax(dec_output/0.6, dim=0) # (vocab_size, )\n logprob = F.log_softmax(dec_output, dim=0) # (vocab_size, )\n symbol = prob.multinomial(num_samples=1).detach() # (1, )\n # _, symbol = prob.topk(1) # (1, )\n _, tmp_symbol = prob.topk(1) # (1, )\n # print('multinomial symbol = {}, prob = {}'.format(symbol, prob[symbol.item()]))\n # print('topk symbol = {}, prob = {}'.format(tmp_symbol, prob[tmp_symbol.item()]))\n logprob = logprob.gather(0, symbol) # (1, )\n return logprob, symbol\n\n for i in range(max_words):\n decoder_output, decoder_hidden_state = self._step(\n decoder_input, decoder_hidden_state, encoder_outputs, goal_hid)\n # disable special tokens from being generated in a normal turn\n if mask:\n decoder_output += special_token_mask.expand(1, 1, -1)\n logprob, symbol = _sample(decoder_output, i)\n logprob_outputs.append(logprob)\n symbol_outputs.append(symbol)\n decoder_input = symbol.view(1, -1)\n\n if vocab[symbol.item()] in stop_tokens:\n break\n\n assert len(logprob_outputs) == len(symbol_outputs)\n # logprob_list = [t.item() for t in logprob_outputs]\n logprob_list = logprob_outputs\n symbol_list = [t.item() for t in symbol_outputs]\n return logprob_list, symbol_list\n\n # For MultiWoz RL\n def forward_rl(self, batch_size, dec_init_state, attn_context, vocab, max_words, goal_hid=None, mask=True, temp=0.1):\n # prepare the BOS inputs\n with th.no_grad():\n bos_var = Variable(th.LongTensor([self.sys_id]))\n bos_var = cast_type(bos_var, LONG, self.use_gpu)\n decoder_input = bos_var.expand(batch_size, 1) # (1, 1)\n decoder_hidden_state = dec_init_state # tuple: (h, c)\n encoder_outputs = attn_context # (1, ctx_len, ctx_cell_size)\n\n logprob_outputs = [] # list of logprob | max_dec_len*(1, )\n symbol_outputs = [] # list of word ids | max_dec_len*(1, )\n\n if mask:\n special_token_mask = Variable(th.FloatTensor(\n [-999. if token in DECODING_MASKED_TOKENS else 0. for token in vocab]))\n special_token_mask = cast_type(\n special_token_mask, FLOAT, self.use_gpu) # (vocab_size, )\n\n def _sample(dec_output, num_i):\n # dec_output: (1, 1, vocab_size), need to softmax and log_softmax\n # (batch_size, vocab_size, )\n dec_output = dec_output.view(batch_size, -1)\n # (batch_size, vocab_size, )\n prob = F.softmax(dec_output/temp, dim=1)\n # (batch_size, vocab_size, )\n logprob = F.log_softmax(dec_output, dim=1)\n symbol = prob.multinomial(\n num_samples=1).detach() # (batch_size, 1)\n # _, symbol = prob.topk(1) # (1, )\n _, tmp_symbol = prob.topk(1) # (1, )\n # print('multinomial symbol = {}, prob = {}'.format(symbol, prob[symbol.item()]))\n # print('topk symbol = {}, prob = {}'.format(tmp_symbol, prob[tmp_symbol.item()]))\n logprob = logprob.gather(1, symbol) # (1, )\n return logprob, symbol\n\n stopped_samples = set()\n for i in range(max_words):\n decoder_output, decoder_hidden_state = self._step(\n decoder_input, decoder_hidden_state, encoder_outputs, goal_hid)\n # disable special tokens from being generated in a normal turn\n if mask:\n decoder_output += special_token_mask.expand(1, 1, -1)\n logprob, symbol = _sample(decoder_output, i)\n logprob_outputs.append(logprob)\n symbol_outputs.append(symbol)\n decoder_input = symbol.view(batch_size, -1)\n for b_id in range(batch_size):\n if vocab[symbol[b_id].item()] == EOS:\n stopped_samples.add(b_id)\n\n if len(stopped_samples) == batch_size:\n break\n\n assert len(logprob_outputs) == len(symbol_outputs)\n symbol_outputs = th.cat(\n symbol_outputs, dim=1).cpu().data.numpy().tolist()\n logprob_outputs = th.cat(logprob_outputs, dim=1)\n logprob_list = []\n symbol_list = []\n for b_id in range(batch_size):\n b_logprob = []\n b_symbol = []\n for t_id in range(logprob_outputs.shape[1]):\n symbol = symbol_outputs[b_id][t_id]\n if vocab[symbol] == EOS and t_id != 0:\n break\n\n b_symbol.append(symbol_outputs[b_id][t_id])\n b_logprob.append(logprob_outputs[b_id][t_id])\n\n logprob_list.append(b_logprob)\n symbol_list.append(b_symbol)\n\n # TODO backward compatible, if batch_size == 1, we remove the nested structure\n if batch_size == 1:\n logprob_list = logprob_list[0]\n symbol_list = symbol_list[0]\n\n return logprob_list, symbol_list\n", "# Copyright 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch.autograd import Variable\n\nfrom convlab2.e2e.rnn_rollout.engines import EngineBase, Criterion\n\n\nclass SelectionEngine(EngineBase):\n def __init__(self, model, args, verbose=False):\n super(SelectionEngine, self).__init__(model, args, verbose)\n self.sel_crit = Criterion(\n self.model.item_dict,\n bad_toks=['<disconnect>', '<disagree>'],\n reduction='mean' if args.sep_sel else 'none')\n\n def _forward(model, batch, sep_sel=False):\n ctx, _, inpts, lens, _, sel_tgt, rev_idxs, hid_idxs, _ = batch\n ctx = Variable(ctx)\n inpts = [Variable(inpt) for inpt in inpts]\n rev_idxs = [Variable(idx) for idx in rev_idxs]\n hid_idxs = [Variable(idx) for idx in hid_idxs]\n if sep_sel:\n sel_tgt = Variable(sel_tgt)\n else:\n sel_tgt = [Variable(t) for t in sel_tgt]\n\n # remove YOU:/THEM: from the end\n sel_out = model(inpts[:-1], lens[:-1], rev_idxs[:-1], hid_idxs[:-1], ctx)\n\n return sel_out, sel_tgt\n\n def train_batch(self, batch):\n sel_out, sel_tgt = SelectionEngine._forward(self.model, batch,\n sep_sel=self.args.sep_sel)\n loss = 0\n if self.args.sep_sel:\n loss = self.sel_crit(sel_out, sel_tgt)\n else:\n for out, tgt in zip(sel_out, sel_tgt):\n loss += self.sel_crit(out, tgt)\n loss /= sel_out[0].size(0)\n\n self.opt.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)\n self.opt.step()\n return loss.item()\n\n def valid_batch(self, batch):\n with torch.no_grad():\n sel_out, sel_tgt = SelectionEngine._forward(self.model, batch,\n sep_sel=self.args.sep_sel)\n loss = 0\n if self.args.sep_sel:\n loss = self.sel_crit(sel_out, sel_tgt)\n else:\n for out, tgt in zip(sel_out, sel_tgt):\n loss += self.sel_crit(out, tgt)\n loss /= sel_out[0].size(0)\n\n return 0, loss.item(), 0\n\n\n", "import torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom convlab2.policy.larl.multiwoz.latent_dialog.base_models import BaseModel\nfrom convlab2.policy.larl.multiwoz.latent_dialog.corpora import SYS, EOS, PAD, BOS\nfrom convlab2.policy.larl.multiwoz.latent_dialog.utils import INT, FLOAT, LONG, Pack, cast_type\nfrom convlab2.policy.larl.multiwoz.latent_dialog.enc2dec.encoders import RnnUttEncoder\nfrom convlab2.policy.larl.multiwoz.latent_dialog.enc2dec.decoders import DecoderRNN, GEN, TEACH_FORCE\nfrom convlab2.policy.larl.multiwoz.latent_dialog.criterions import NLLEntropy, CatKLLoss, Entropy, NormKLLoss\nfrom convlab2.policy.larl.multiwoz.latent_dialog import nn_lib\nimport numpy as np\n\n\nclass SysPerfectBD2Word(BaseModel):\n def __init__(self, corpus, config):\n super(SysPerfectBD2Word, self).__init__(config)\n self.vocab = corpus.vocab\n self.vocab_dict = corpus.vocab_dict\n self.vocab_size = len(self.vocab)\n self.bos_id = self.vocab_dict[BOS]\n self.eos_id = self.vocab_dict[EOS]\n self.pad_id = self.vocab_dict[PAD]\n self.bs_size = corpus.bs_size\n self.db_size = corpus.db_size\n\n self.embedding = None\n self.utt_encoder = RnnUttEncoder(vocab_size=self.vocab_size,\n embedding_dim=config.embed_size,\n feat_size=0,\n goal_nhid=0,\n rnn_cell=config.utt_rnn_cell,\n utt_cell_size=config.utt_cell_size,\n num_layers=config.num_layers,\n input_dropout_p=config.dropout,\n output_dropout_p=config.dropout,\n bidirectional=config.bi_utt_cell,\n variable_lengths=False,\n use_attn=config.enc_use_attn,\n embedding=self.embedding)\n\n self.policy = nn.Sequential(nn.Linear(self.utt_encoder.output_size + self.db_size + self.bs_size,\n config.dec_cell_size), nn.Tanh(), nn.Dropout(config.dropout))\n\n self.decoder = DecoderRNN(input_dropout_p=config.dropout,\n rnn_cell=config.dec_rnn_cell,\n input_size=config.embed_size,\n hidden_size=config.dec_cell_size,\n num_layers=config.num_layers,\n output_dropout_p=config.dropout,\n bidirectional=False,\n vocab_size=self.vocab_size,\n use_attn=config.dec_use_attn,\n ctx_cell_size=self.utt_encoder.output_size,\n attn_mode=config.dec_attn_mode,\n sys_id=self.bos_id,\n eos_id=self.eos_id,\n use_gpu=config.use_gpu,\n max_dec_len=config.max_dec_len,\n embedding=self.embedding)\n\n self.nll = NLLEntropy(self.pad_id, config.avg_type)\n\n def forward(self, data_feed, mode, clf=False, gen_type='greedy', return_latent=False):\n ctx_lens = data_feed['context_lens'] # (batch_size, )\n short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)\n out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)\n bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n batch_size = len(ctx_lens)\n\n utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))\n\n # get decoder inputs\n dec_inputs = out_utts[:, :-1]\n labels = out_utts[:, 1:].contiguous()\n\n # pack attention context\n if self.config.dec_use_attn:\n attn_context = enc_outs\n else:\n attn_context = None\n\n # create decoder initial states\n dec_init_state = self.policy(th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)).unsqueeze(0)\n\n # decode\n if self.config.dec_rnn_cell == 'lstm':\n # h_dec_init_state = utt_summary.squeeze(1).unsqueeze(0)\n dec_init_state = tuple([dec_init_state, dec_init_state])\n\n dec_outputs, dec_hidden_state, ret_dict = self.decoder(batch_size=batch_size,\n dec_inputs=dec_inputs,\n # (batch_size, response_size-1)\n dec_init_state=dec_init_state, # tuple: (h, c)\n attn_context=attn_context,\n # (batch_size, max_ctx_len, ctx_cell_size)\n mode=mode,\n gen_type=gen_type,\n beam_size=self.config.beam_size) # (batch_size, goal_nhid)\n if mode == GEN:\n return ret_dict, labels\n if return_latent:\n return Pack(nll=self.nll(dec_outputs, labels),\n latent_action=dec_init_state)\n else:\n return Pack(nll=self.nll(dec_outputs, labels))\n\n def forward_rl(self, data_feed, max_words, temp=0.1):\n ctx_lens = data_feed['context_lens'] # (batch_size, )\n short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)\n out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)\n bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n batch_size = len(ctx_lens)\n\n utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))\n\n # pack attention context\n if self.config.dec_use_attn:\n attn_context = enc_outs\n else:\n attn_context = None\n\n # create decoder initial states\n dec_init_state = self.policy(th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)).unsqueeze(0)\n\n if self.config.dec_rnn_cell == 'lstm':\n dec_init_state = tuple([dec_init_state, dec_init_state])\n\n # decode\n logprobs, outs = self.decoder.forward_rl(batch_size=batch_size,\n dec_init_state=dec_init_state,\n attn_context=attn_context,\n vocab=self.vocab,\n max_words=max_words,\n temp=temp)\n return logprobs, outs\n\n\nclass SysPerfectBD2Cat(BaseModel):\n def __init__(self, corpus, config):\n super(SysPerfectBD2Cat, self).__init__(config)\n self.vocab = corpus.vocab\n self.vocab_dict = corpus.vocab_dict\n self.vocab_size = len(self.vocab)\n self.bos_id = self.vocab_dict[BOS]\n self.eos_id = self.vocab_dict[EOS]\n self.pad_id = self.vocab_dict[PAD]\n self.bs_size = corpus.bs_size\n self.db_size = corpus.db_size\n self.k_size = config.k_size\n self.y_size = config.y_size\n self.simple_posterior = config.simple_posterior\n self.contextual_posterior = config.contextual_posterior\n\n self.embedding = None\n self.utt_encoder = RnnUttEncoder(vocab_size=self.vocab_size,\n embedding_dim=config.embed_size,\n feat_size=0,\n goal_nhid=0,\n rnn_cell=config.utt_rnn_cell,\n utt_cell_size=config.utt_cell_size,\n num_layers=config.num_layers,\n input_dropout_p=config.dropout,\n output_dropout_p=config.dropout,\n bidirectional=config.bi_utt_cell,\n variable_lengths=False,\n use_attn=config.enc_use_attn,\n embedding=self.embedding)\n\n self.c2z = nn_lib.Hidden2Discrete(self.utt_encoder.output_size + self.db_size + self.bs_size,\n config.y_size, config.k_size, is_lstm=False)\n self.z_embedding = nn.Linear(self.y_size * self.k_size, config.dec_cell_size, bias=False)\n self.gumbel_connector = nn_lib.GumbelConnector(config.use_gpu)\n if not self.simple_posterior:\n if self.contextual_posterior:\n self.xc2z = nn_lib.Hidden2Discrete(self.utt_encoder.output_size * 2 + self.db_size + self.bs_size,\n config.y_size, config.k_size, is_lstm=False)\n else:\n self.xc2z = nn_lib.Hidden2Discrete(self.utt_encoder.output_size, config.y_size, config.k_size, is_lstm=False)\n\n self.decoder = DecoderRNN(input_dropout_p=config.dropout,\n rnn_cell=config.dec_rnn_cell,\n input_size=config.embed_size,\n hidden_size=config.dec_cell_size,\n num_layers=config.num_layers,\n output_dropout_p=config.dropout,\n bidirectional=False,\n vocab_size=self.vocab_size,\n use_attn=config.dec_use_attn,\n ctx_cell_size=config.dec_cell_size,\n attn_mode=config.dec_attn_mode,\n sys_id=self.bos_id,\n eos_id=self.eos_id,\n use_gpu=config.use_gpu,\n max_dec_len=config.max_dec_len,\n embedding=self.embedding)\n\n self.nll = NLLEntropy(self.pad_id, config.avg_type)\n self.cat_kl_loss = CatKLLoss()\n self.entropy_loss = Entropy()\n self.log_uniform_y = Variable(th.log(th.ones(1) / config.k_size))\n self.eye = Variable(th.eye(self.config.y_size).unsqueeze(0))\n self.beta = self.config.beta if hasattr(self.config, 'beta') else 0.0\n if self.use_gpu:\n self.log_uniform_y = self.log_uniform_y.cuda()\n self.eye = self.eye.cuda()\n\n def valid_loss(self, loss, batch_cnt=None):\n if self.simple_posterior:\n total_loss = loss.nll\n if self.config.use_pr > 0.0:\n total_loss += self.beta * loss.pi_kl\n else:\n total_loss = loss.nll + loss.pi_kl\n\n if self.config.use_mi:\n total_loss += (loss.b_pr * self.beta)\n\n if self.config.use_diversity:\n total_loss += loss.diversity\n\n return total_loss\n\n def forward(self, data_feed, mode, clf=False, gen_type='greedy', use_py=None, return_latent=False):\n ctx_lens = data_feed['context_lens'] # (batch_size, )\n short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)\n out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)\n bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n batch_size = len(ctx_lens)\n\n utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))\n\n # get decoder inputs\n dec_inputs = out_utts[:, :-1]\n labels = out_utts[:, 1:].contiguous()\n\n # create decoder initial states\n enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)\n # create decoder initial states\n if self.simple_posterior:\n logits_qy, log_qy = self.c2z(enc_last)\n sample_y = self.gumbel_connector(logits_qy, hard=mode==GEN)\n log_py = self.log_uniform_y\n else:\n logits_py, log_py = self.c2z(enc_last)\n # encode response and use posterior to find q(z|x, c)\n x_h, _, _ = self.utt_encoder(out_utts.unsqueeze(1))\n if self.contextual_posterior:\n logits_qy, log_qy = self.xc2z(th.cat([enc_last, x_h.squeeze(1)], dim=1))\n else:\n logits_qy, log_qy = self.xc2z(x_h.squeeze(1))\n\n # use prior at inference time, otherwise use posterior\n if mode == GEN or (use_py is not None and use_py is True):\n sample_y = self.gumbel_connector(logits_py, hard=False)\n else:\n sample_y = self.gumbel_connector(logits_qy, hard=True)\n\n # pack attention context\n if self.config.dec_use_attn:\n z_embeddings = th.t(self.z_embedding.weight).split(self.k_size, dim=0)\n attn_context = []\n temp_sample_y = sample_y.view(-1, self.config.y_size, self.config.k_size)\n for z_id in range(self.y_size):\n attn_context.append(th.mm(temp_sample_y[:, z_id], z_embeddings[z_id]).unsqueeze(1))\n attn_context = th.cat(attn_context, dim=1)\n dec_init_state = th.sum(attn_context, dim=1).unsqueeze(0)\n else:\n dec_init_state = self.z_embedding(sample_y.view(1, -1, self.config.y_size * self.config.k_size))\n attn_context = None\n\n # decode\n if self.config.dec_rnn_cell == 'lstm':\n dec_init_state = tuple([dec_init_state, dec_init_state])\n\n dec_outputs, dec_hidden_state, ret_dict = self.decoder(batch_size=batch_size,\n dec_inputs=dec_inputs,\n # (batch_size, response_size-1)\n dec_init_state=dec_init_state, # tuple: (h, c)\n attn_context=attn_context,\n # (batch_size, max_ctx_len, ctx_cell_size)\n mode=mode,\n gen_type=gen_type,\n beam_size=self.config.beam_size) # (batch_size, goal_nhid)\n if mode == GEN:\n ret_dict['sample_z'] = sample_y\n ret_dict['log_qy'] = log_qy\n return ret_dict, labels\n\n else:\n result = Pack(nll=self.nll(dec_outputs, labels))\n # regularization qy to be uniform\n avg_log_qy = th.exp(log_qy.view(-1, self.config.y_size, self.config.k_size))\n avg_log_qy = th.log(th.mean(avg_log_qy, dim=0) + 1e-15)\n b_pr = self.cat_kl_loss(avg_log_qy, self.log_uniform_y, batch_size, unit_average=True)\n mi = self.entropy_loss(avg_log_qy, unit_average=True) - self.entropy_loss(log_qy, unit_average=True)\n pi_kl = self.cat_kl_loss(log_qy, log_py, batch_size, unit_average=True)\n q_y = th.exp(log_qy).view(-1, self.config.y_size, self.config.k_size) # b\n p = th.pow(th.bmm(q_y, th.transpose(q_y, 1, 2)) - self.eye, 2)\n\n result['pi_kl'] = pi_kl\n\n result['diversity'] = th.mean(p)\n result['nll'] = self.nll(dec_outputs, labels)\n result['b_pr'] = b_pr\n result['mi'] = mi\n return result\n\n def forward_rl(self, data_feed, max_words, temp=0.1):\n ctx_lens = data_feed['context_lens'] # (batch_size, )\n short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)\n bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n batch_size = len(ctx_lens)\n\n utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))\n\n # create decoder initial states\n enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)\n # create decoder initial states\n if self.simple_posterior:\n logits_py, log_qy = self.c2z(enc_last)\n else:\n logits_py, log_qy = self.c2z(enc_last)\n\n qy = F.softmax(logits_py / temp, dim=1) # (batch_size, vocab_size, )\n log_qy = F.log_softmax(logits_py, dim=1) # (batch_size, vocab_size, )\n idx = th.multinomial(qy, 1).detach()\n logprob_sample_z = log_qy.gather(1, idx).view(-1, self.y_size)\n joint_logpz = th.sum(logprob_sample_z, dim=1)\n sample_y = cast_type(Variable(th.zeros(log_qy.size())), FLOAT, self.use_gpu)\n sample_y.scatter_(1, idx, 1.0)\n\n # pack attention context\n if self.config.dec_use_attn:\n z_embeddings = th.t(self.z_embedding.weight).split(self.k_size, dim=0)\n attn_context = []\n temp_sample_y = sample_y.view(-1, self.config.y_size, self.config.k_size)\n for z_id in range(self.y_size):\n attn_context.append(th.mm(temp_sample_y[:, z_id], z_embeddings[z_id]).unsqueeze(1))\n attn_context = th.cat(attn_context, dim=1)\n dec_init_state = th.sum(attn_context, dim=1).unsqueeze(0)\n else:\n dec_init_state = self.z_embedding(sample_y.view(1, -1, self.config.y_size * self.config.k_size))\n attn_context = None\n\n # decode\n if self.config.dec_rnn_cell == 'lstm':\n dec_init_state = tuple([dec_init_state, dec_init_state])\n\n # decode\n logprobs, outs = self.decoder.forward_rl(batch_size=batch_size,\n dec_init_state=dec_init_state,\n attn_context=attn_context,\n vocab=self.vocab,\n max_words=max_words,\n temp=0.1)\n return logprobs, outs, joint_logpz, sample_y\n\n\nclass SysPerfectBD2Gauss(BaseModel):\n def __init__(self, corpus, config):\n super(SysPerfectBD2Gauss, self).__init__(config)\n self.vocab = corpus.vocab\n self.vocab_dict = corpus.vocab_dict\n self.vocab_size = len(self.vocab)\n self.bos_id = self.vocab_dict[BOS]\n self.eos_id = self.vocab_dict[EOS]\n self.pad_id = self.vocab_dict[PAD]\n self.bs_size = corpus.bs_size\n self.db_size = corpus.db_size\n self.y_size = config.y_size\n self.simple_posterior = config.simple_posterior\n\n self.embedding = None\n self.utt_encoder = RnnUttEncoder(vocab_size=self.vocab_size,\n embedding_dim=config.embed_size,\n feat_size=0,\n goal_nhid=0,\n rnn_cell=config.utt_rnn_cell,\n utt_cell_size=config.utt_cell_size,\n num_layers=config.num_layers,\n input_dropout_p=config.dropout,\n output_dropout_p=config.dropout,\n bidirectional=config.bi_utt_cell,\n variable_lengths=False,\n use_attn=config.enc_use_attn,\n embedding=self.embedding)\n\n self.c2z = nn_lib.Hidden2Gaussian(self.utt_encoder.output_size + self.db_size + self.bs_size,\n config.y_size, is_lstm=False)\n self.gauss_connector = nn_lib.GaussianConnector(self.use_gpu)\n self.z_embedding = nn.Linear(self.y_size, config.dec_cell_size)\n if not self.simple_posterior:\n self.xc2z = nn_lib.Hidden2Gaussian(self.utt_encoder.output_size * 2 + self.db_size + self.bs_size,\n config.y_size, is_lstm=False)\n\n self.decoder = DecoderRNN(input_dropout_p=config.dropout,\n rnn_cell=config.dec_rnn_cell,\n input_size=config.embed_size,\n hidden_size=config.dec_cell_size,\n num_layers=config.num_layers,\n output_dropout_p=config.dropout,\n bidirectional=False,\n vocab_size=self.vocab_size,\n use_attn=config.dec_use_attn,\n ctx_cell_size=config.dec_cell_size,\n attn_mode=config.dec_attn_mode,\n sys_id=self.bos_id,\n eos_id=self.eos_id,\n use_gpu=config.use_gpu,\n max_dec_len=config.max_dec_len,\n embedding=self.embedding)\n\n self.nll = NLLEntropy(self.pad_id, config.avg_type)\n self.gauss_kl = NormKLLoss(unit_average=True)\n self.zero = cast_type(th.zeros(1), FLOAT, self.use_gpu)\n\n def valid_loss(self, loss, batch_cnt=None):\n if self.simple_posterior:\n total_loss = loss.nll\n if self.config.use_pr > 0.0:\n total_loss += self.config.beta * loss.pi_kl\n else:\n total_loss = loss.nll + loss.pi_kl\n\n return total_loss\n\n def forward(self, data_feed, mode, clf=False, gen_type='greedy', use_py=None, return_latent=False):\n ctx_lens = data_feed['context_lens'] # (batch_size, )\n short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)\n out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)\n bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n batch_size = len(ctx_lens)\n\n utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))\n\n # get decoder inputs\n dec_inputs = out_utts[:, :-1]\n labels = out_utts[:, 1:].contiguous()\n\n # create decoder initial states\n enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)\n\n # create decoder initial states\n if self.simple_posterior:\n q_mu, q_logvar = self.c2z(enc_last)\n sample_z = self.gauss_connector(q_mu, q_logvar)\n p_mu, p_logvar = self.zero, self.zero\n else:\n p_mu, p_logvar = self.c2z(enc_last)\n # encode response and use posterior to find q(z|x, c)\n x_h, _, _ = self.utt_encoder(out_utts.unsqueeze(1))\n q_mu, q_logvar = self.xc2z(th.cat([enc_last, x_h.squeeze(1)], dim=1))\n\n # use prior at inference time, otherwise use posterior\n if mode == GEN or use_py:\n sample_z = self.gauss_connector(p_mu, p_logvar)\n else:\n sample_z = self.gauss_connector(q_mu, q_logvar)\n\n # pack attention context\n dec_init_state = self.z_embedding(sample_z.unsqueeze(0))\n attn_context = None\n\n # decode\n if self.config.dec_rnn_cell == 'lstm':\n dec_init_state = tuple([dec_init_state, dec_init_state])\n\n dec_outputs, dec_hidden_state, ret_dict = self.decoder(batch_size=batch_size,\n dec_inputs=dec_inputs,\n dec_init_state=dec_init_state, # tuple: (h, c)\n attn_context=attn_context,\n mode=mode,\n gen_type=gen_type,\n beam_size=self.config.beam_size) # (batch_size, goal_nhid)\n if mode == GEN:\n ret_dict['sample_z'] = sample_z\n return ret_dict, labels\n\n else:\n result = Pack(nll=self.nll(dec_outputs, labels))\n pi_kl = self.gauss_kl(q_mu, q_logvar, p_mu, p_logvar)\n result['pi_kl'] = pi_kl\n result['nll'] = self.nll(dec_outputs, labels)\n return result\n\n def gaussian_logprob(self, mu, logvar, sample_z):\n var = th.exp(logvar)\n constant = float(-0.5 * np.log(2*np.pi))\n logprob = constant - 0.5 * logvar - th.pow((mu-sample_z), 2) / (2.0*var)\n return logprob\n\n def forward_rl(self, data_feed, max_words, temp=0.1):\n ctx_lens = data_feed['context_lens'] # (batch_size, )\n short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)\n bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n batch_size = len(ctx_lens)\n\n utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))\n\n # create decoder initial states\n enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)\n # create decoder initial states\n p_mu, p_logvar = self.c2z(enc_last)\n\n sample_z = th.normal(p_mu, th.sqrt(th.exp(p_logvar))).detach()\n logprob_sample_z = self.gaussian_logprob(p_mu, self.zero, sample_z)\n joint_logpz = th.sum(logprob_sample_z, dim=1)\n\n # pack attention context\n dec_init_state = self.z_embedding(sample_z.unsqueeze(0))\n attn_context = None\n\n # decode\n if self.config.dec_rnn_cell == 'lstm':\n dec_init_state = tuple([dec_init_state, dec_init_state])\n\n # decode\n logprobs, outs = self.decoder.forward_rl(batch_size=batch_size,\n dec_init_state=dec_init_state,\n attn_context=attn_context,\n vocab=self.vocab,\n max_words=max_words,\n temp=0.1)\n return logprobs, outs, joint_logpz, sample_z\n\n" ]
[ [ "torch.nn.functional.softmax", "torch.LongTensor", "torch.nn.functional.log_softmax", "torch.cat", "torch.nn.Embedding", "torch.nn.Linear", "torch.no_grad", "torch.bmm", "torch.FloatTensor", "torch.nn.functional.tanh" ], [ "torch.no_grad", "torch.autograd.Variable" ], [ "torch.mean", "torch.nn.functional.softmax", "torch.nn.Dropout", "numpy.log", "torch.ones", "torch.nn.functional.log_softmax", "torch.cat", "torch.zeros", "torch.transpose", "torch.mm", "torch.sum", "torch.eye", "torch.multinomial", "torch.nn.Tanh", "torch.exp", "torch.nn.Linear", "torch.t", "torch.pow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Jramirezg/ThreatMapper
[ "af5fda3ff585f8728a7a0b48ae6818ed189e4dbf" ]
[ "deepfence_backend/tasks/task_scheduler.py" ]
[ "import arrow\nfrom config.app import celery_app, app\nfrom models.container_image_registry import RegistryCredential\nfrom models.scheduler import Scheduler\nfrom models.setting import Setting\nfrom croniter import croniter\nfrom utils import constants\nimport time\nfrom datetime import datetime\nfrom utils.helper import websocketio_channel_name_format, get_image_cve_status\nfrom config.redisconfig import redis\nfrom utils.esconn import ESConn\nfrom resource_models.node import Node\nfrom utils.reports import prepare_report_download, prepare_report_email_body\nfrom utils.response import set_response\nfrom flask import make_response\nimport json\nimport uuid\nfrom copy import deepcopy\nfrom utils.helper import get_all_scanned_node, get_all_scanned_images\nimport pandas as pd\nimport re\n\n\n@celery_app.task\ndef task_scheduler():\n with app.app_context():\n curr_time = arrow.now(tz=\"+00:00\").datetime.replace(minute=0, second=0, microsecond=0)\n scheduled_tasks = Scheduler.query.filter_by(is_enabled=True).all()\n if not scheduled_tasks:\n return\n for scheduled_task in scheduled_tasks:\n if croniter.match(scheduled_task.cron_expr, curr_time):\n run_node_task(scheduled_task.action, scheduled_task.nodes, scheduled_task.id, scheduled_task.cron_expr)\n\n\ndef run_node_task(action, node_action_details, scheduler_id=None, cron_expr=None):\n with app.app_context():\n curr_time = arrow.now(tz=\"+00:00\").datetime\n if scheduler_id:\n try:\n scheduled_task = Scheduler.query.get(scheduler_id)\n scheduled_task.last_ran_at = curr_time\n scheduled_task.status = \"running\"\n scheduled_task.save()\n except Exception as ex:\n app.logger.error(ex)\n return\n\n def save_scheduled_task_status(status):\n if scheduler_id:\n try:\n scheduled_task = Scheduler.query.get(scheduler_id)\n scheduled_task.status = status\n scheduled_task.save()\n except Exception as ex:\n app.logger.error(ex)\n\n save_scheduled_task_status(\"In Progress\")\n node_type = node_action_details[\"node_type\"]\n df_id_to_scope_id_map = {}\n topology_data_df_format = {}\n registry_credential = None\n if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:\n try:\n registry_credential = RegistryCredential.query.get(\n node_action_details[\"registry_images\"][\"registry_id\"])\n except Exception as ex:\n save_scheduled_task_status(\"Error: \" + str(ex))\n app.logger.error(ex)\n return\n else:\n if not node_action_details.get(\"node_id_list\"):\n node_action_details[\"node_id_list\"] = []\n for i in range(3):\n try:\n redis_pipe = redis.pipeline()\n redis_pipe.hgetall(constants.DF_ID_TO_SCOPE_ID_REDIS_KEY_PREFIX + node_type.upper())\n redis_pipe.get(websocketio_channel_name_format(node_type + \"?format=deepfence\")[1])\n redis_resp = redis_pipe.execute()\n df_id_to_scope_id_map = redis_resp[0]\n if redis_resp[1]:\n topology_data_df_format = json.loads(redis_resp[1])\n if topology_data_df_format and df_id_to_scope_id_map:\n break\n else:\n app.logger.error(\"topology data is empty, retrying\")\n time.sleep(10)\n except Exception as ex:\n app.logger.error(ex)\n time.sleep(10)\n if action in [constants.NODE_ACTION_CVE_SCAN_START, constants.NODE_ACTION_SCHEDULE_CVE_SCAN]:\n if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:\n from config.app import celery_app\n redis_lock_keys = []\n redis_pipe = redis.pipeline()\n image_list_details_str = redis.get(\"{0}:{1}\".format(constants.REGISTRY_IMAGES_CACHE_KEY_PREFIX,\n node_action_details[\"registry_images\"][\n \"registry_id\"]))\n if image_list_details_str:\n if node_action_details[\"registry_images\"].get(\"all_registry_images\", False):\n image_dict = json.loads(image_list_details_str)\n image_df = pd.DataFrame(image_dict['image_list'])\n image_df['timestamp'] = pd.to_datetime(image_df.pushed_at)\n sorted_df = image_df.sort_values(by=['timestamp'], ascending=False)\n df_unique_list = sorted_df[\"image_tag\"].unique()\n df_unique = pd.DataFrame(data=df_unique_list, columns=[\"image_tag\"])\n sorted_df_by_image_tag = image_df.sort_values(\"image_tag\")\n images_by_tags = df_unique.merge(sorted_df_by_image_tag, on=[\"image_tag\"], how=\"outer\")[\n \"image_name_with_tag\"]\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"] = images_by_tags\n elif node_action_details[\"registry_images\"].get(\"only_new_images\", False):\n image_dict = json.loads(image_list_details_str)\n all_registry_images = set([image[\"image_name_with_tag\"] for image in image_dict['image_list']])\n if cron_expr:\n pattern = '^0.*?\\*/(\\d).*?$'\n match = re.search(pattern, cron_expr)\n if match:\n days_interval = int(match.group(1))\n else:\n days_interval = 1\n images_need_to_be_scanned = all_registry_images - get_all_scanned_images(days_interval)\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"] = list(\n images_need_to_be_scanned)\n elif node_action_details[\"registry_images\"].get(\"registry_scan_type\", None) == \"latest_timestamp\":\n image_dict = json.loads(image_list_details_str)\n image_df = pd.DataFrame(image_dict['image_list'])\n image_df['timestamp'] = pd.to_datetime(image_df.pushed_at)\n grouped = image_df.groupby(['image_name']).agg({\"timestamp\": max}).reset_index()\n latest_images_by_tags = image_df.merge(grouped, on=[\"image_name\", \"timestamp\"], how=\"inner\")[\n 'image_name_with_tag']\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"] = latest_images_by_tags\n elif node_action_details[\"registry_images\"].get(\"registry_scan_type\", None) == \"image_tags\":\n if node_action_details[\"registry_images\"].get(\"image_tags\", []):\n image_tags = node_action_details[\"registry_images\"].get(\"image_tags\", [])\n image_dict = json.loads(image_list_details_str)\n image_df = pd.DataFrame(image_dict['image_list'])\n images_by_tags = image_df[image_df[\"image_tag\"].isin(image_tags)][\"image_name_with_tag\"]\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"] = images_by_tags\n else:\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"] = []\n for image_name_with_tag in node_action_details[\"registry_images\"][\"image_name_with_tag_list\"]:\n lock_key = \"{0}:{1}\".format(constants.NODE_ACTION_CVE_SCAN_START, image_name_with_tag)\n redis_pipe.incr(lock_key)\n redis_lock_keys.append(lock_key)\n redis_resp = redis_pipe.execute()\n time.sleep(1)\n image_cve_status = get_image_cve_status()\n for i, image_name_with_tag in enumerate(\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"]):\n try:\n if redis_resp[i] != 1:\n continue\n cve_status = image_cve_status.get(image_name_with_tag, {}).get(\"action\", \"\")\n if cve_status:\n if cve_status == constants.CVE_SCAN_STATUS_QUEUED or cve_status in constants.CVE_SCAN_RUNNING_STATUS:\n continue\n datetime_now = datetime.now()\n scan_id = image_name_with_tag + \"_\" + datetime_now.strftime(\"%Y-%m-%dT%H:%M:%S\") + \".000\"\n body = {\n \"masked\": \"false\", \"type\": constants.CVE_SCAN_LOGS_INDEX, \"scan_id\": scan_id, \"host\": \"\",\n \"@timestamp\": datetime_now.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"), \"cve_scan_message\": \"\",\n \"action\": constants.CVE_SCAN_STATUS_QUEUED, \"host_name\": \"\", \"node_id\": image_name_with_tag,\n \"time_stamp\": int(time.time() * 1000.0), \"node_type\": constants.NODE_TYPE_CONTAINER_IMAGE\n }\n ESConn.create_doc(constants.CVE_SCAN_LOGS_INDEX, body)\n scan_details = {\n \"cve_node_id\": image_name_with_tag, \"scan_types\": node_action_details[\"scan_type\"],\n \"registry_type\": registry_credential.registry_type, \"scan_id\": scan_id,\n \"credential_id\": registry_credential.id}\n celery_task_id = \"cve_scan:\" + scan_id\n if node_action_details[\"registry_images\"].get(\"priority\", False):\n celery_app.send_task('tasks.vulnerability_scan_worker.vulnerability_scan', args=(),\n task_id=celery_task_id, kwargs={\"scan_details\": scan_details},\n queue=constants.VULNERABILITY_SCAN_PRIORITY_QUEUE)\n else:\n celery_app.send_task('tasks.vulnerability_scan_worker.vulnerability_scan', args=(),\n task_id=celery_task_id, kwargs={\"scan_details\": scan_details},\n queue=constants.VULNERABILITY_SCAN_QUEUE)\n except Exception as ex:\n save_scheduled_task_status(\"Error: \" + str(ex))\n app.logger.error(ex)\n time.sleep(2)\n redis_pipe = redis.pipeline()\n for lock_key in redis_lock_keys:\n redis.delete(lock_key)\n redis_pipe.execute()\n else:\n node_list = []\n redis_lock_keys = []\n redis_pipe = redis.pipeline()\n for node_id in node_action_details[\"node_id_list\"]:\n try:\n node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,\n topology_data_df_format=topology_data_df_format)\n if node.type == constants.NODE_TYPE_HOST:\n lock_key = \"{0}:{1}\".format(constants.NODE_ACTION_CVE_SCAN_START, node.host_name)\n else:\n if not node.image_name_tag:\n continue\n lock_key = \"{0}:{1}\".format(constants.NODE_ACTION_CVE_SCAN_START, node.image_name_tag)\n if lock_key in redis_lock_keys:\n # If same image, different container, already selected, don't scan again\n continue\n redis_lock_keys.append(lock_key)\n redis_pipe.incr(lock_key)\n node_list.append(node)\n except Exception as ex:\n save_scheduled_task_status(\"Error: \" + str(ex))\n app.logger.error(ex)\n if not node_list:\n error_message = \"No node available for scan\"\n save_scheduled_task_status(\"Error: \" + error_message)\n app.logger.error(error_message)\n return\n redis_resp = redis_pipe.execute()\n for i, node in enumerate(node_list):\n if redis_resp[i] != 1:\n continue\n try:\n node.cve_scan_start(node_action_details[\"scan_type\"],\n priority=node_action_details.get(\"priority\", False))\n except Exception as ex:\n save_scheduled_task_status(\"Error: \" + str(ex))\n app.logger.error(ex)\n time.sleep(1)\n redis_pipe = redis.pipeline()\n for lock_key in redis_lock_keys:\n redis.delete(lock_key)\n redis_pipe.execute()\n elif action == constants.NODE_ACTION_CVE_SCAN_STOP:\n if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:\n from config.app import celery_app\n if node_action_details[\"registry_images\"].get(\"all_registry_images\", False):\n image_list_details_str = redis.get(\"{0}:{1}\".format(constants.REGISTRY_IMAGES_CACHE_KEY_PREFIX,\n node_action_details[\"registry_images\"][\n \"registry_id\"]))\n image_dict = json.loads(image_list_details_str)\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"] = [image[\"image_name_with_tag\"]\n for image in\n image_dict['image_list']]\n for image_name_with_tag in node_action_details[\"registry_images\"][\"image_name_with_tag_list\"]:\n try:\n es_response = ESConn.search_by_and_clause(constants.CVE_SCAN_LOGS_INDEX,\n {\"node_id\": image_name_with_tag}, 0, size=1)\n latest_cve_scan_doc = {}\n cve_scan_list = es_response.get(\"hits\", [])\n if cve_scan_list:\n cve_scan_doc = cve_scan_list[0]\n latest_cve_scan_doc = cve_scan_doc.get('_source', {})\n latest_cve_scan_doc.update({'_id': cve_scan_doc.get('_id', \"\")})\n if latest_cve_scan_doc:\n status = latest_cve_scan_doc.get(\"action\", \"\")\n scan_id = latest_cve_scan_doc.get(\"scan_id\", \"\")\n if (status in constants.CVE_SCAN_NOT_RUNNING_STATUS) or (not scan_id):\n continue\n elif status != constants.CVE_SCAN_STATUS_QUEUED:\n continue\n celery_task_id = \"cve_scan:\" + scan_id\n celery_app.control.revoke(celery_task_id, terminate=False)\n body = {\n \"masked\": \"false\", \"type\": constants.CVE_SCAN_LOGS_INDEX, \"scan_id\": scan_id,\n \"cve_scan_message\": \"Scan stopped by user\", \"time_stamp\": int(time.time() * 1000.0),\n \"@timestamp\": datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"), \"host\": \"\",\n \"action\": constants.CVE_SCAN_STATUS_STOPPED, \"host_name\": \"\",\n \"node_id\": latest_cve_scan_doc.get(\"node_id\", \"\"),\n \"node_type\": constants.NODE_TYPE_CONTAINER_IMAGE\n }\n ESConn.create_doc(constants.CVE_SCAN_LOGS_INDEX, body)\n except Exception as ex:\n save_scheduled_task_status(\"Error: \" + str(ex))\n app.logger.error(ex)\n else:\n for node_id in node_action_details[\"node_id_list\"]:\n try:\n node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,\n topology_data_df_format=topology_data_df_format)\n node.cve_scan_stop()\n except Exception as ex:\n save_scheduled_task_status(\"Error: \" + str(ex))\n app.logger.error(ex)\n elif action == constants.NODE_ACTION_SCHEDULE_SEND_REPORT:\n domain_name = \"\"\n console_url_setting = Setting.query.filter_by(key=\"console_url\").one_or_none()\n if console_url_setting and console_url_setting.value:\n domain_name = console_url_setting.value.get(\"value\")\n report_id = uuid.uuid4()\n body = {\n \"type\": constants.REPORT_INDEX,\n \"report_id\": report_id,\n \"status\": \"started\",\n \"masked\": \"false\",\n \"@timestamp\": datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n }\n ESConn.create_doc(constants.REPORT_INDEX, body, refresh=\"wait_for\")\n if node_action_details.get('include_dead_nodes') is True:\n if node_type == 'host':\n if len(node_action_details['filters'].get('host_name', [])) == 0:\n node_action_details['filters']['host_name'] = get_all_scanned_node()\n from config.app import celery_app\n celery_app.send_task(\n 'tasks.common_worker.generate_report', args=(),\n kwargs={\"report_id\": report_id, \"filters\": node_action_details.get(\"filters\", {}),\n \"lucene_query_string\": \"\",\n \"number\": node_action_details.get(\"duration\", {}).get(\"number\", 0),\n \"time_unit\": node_action_details.get(\"duration\", {}).get(\"time_unit\", \"day\"),\n \"domain_name\": domain_name, \"resources\": node_action_details.get(\"resources\", {}),\n \"file_type\": node_action_details.get(\"file_type\", \"xlsx\"), \"node_type\": node_type,\n \"include_dead_nodes\": node_action_details.get(\"include_dead_nodes\", False),\n \"report_email\": node_action_details[\"report_email\"]})\n return set_response(data=\"Started\")\n elif action == constants.NODE_ACTION_DOWNLOAD_REPORT:\n domain_name = \"\"\n console_url_setting = Setting.query.filter_by(key=\"console_url\").one_or_none()\n if console_url_setting and console_url_setting.value:\n domain_name = console_url_setting.value.get(\"value\")\n report_id = uuid.uuid4()\n body = {\n \"type\": constants.REPORT_INDEX,\n \"report_id\": report_id,\n \"status\": \"started\",\n \"masked\": \"false\",\n \"duration\": \"\",\n \"@timestamp\": datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n }\n ESConn.create_doc(constants.REPORT_INDEX, body, refresh=\"wait_for\")\n if node_action_details.get('include_dead_nodes') is True:\n if node_type == 'host':\n if len(node_action_details['filters'].get('host_name', [])) == 0:\n node_action_details['filters']['host_name'] = get_all_scanned_node()\n from config.app import celery_app\n celery_app.send_task(\n 'tasks.common_worker.generate_report', args=(),\n kwargs={\"report_id\": report_id, \"filters\": node_action_details.get(\"filters\", {}),\n \"lucene_query_string\": \"\",\n \"number\": node_action_details.get(\"duration\", {}).get(\"number\", 0),\n \"time_unit\": node_action_details.get(\"duration\", {}).get(\"time_unit\", \"d\"),\n \"domain_name\": domain_name, \"resources\": node_action_details.get(\"resources\", {}),\n \"file_type\": node_action_details.get(\"file_type\", \"xlsx\"), \"node_type\": node_type,\n \"include_dead_nodes\": node_action_details.get(\"include_dead_nodes\", False),\n \"report_email\": \"\"})\n return set_response(data=\"Started\")\n save_scheduled_task_status(\"Success\")\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
anonymous-cv/cvpr-sub
[ "6307520c73716de73ef63f5239bdac8dda20da41" ]
[ "test_pretrain.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport os\nimport time\nimport argparse\nimport sys\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom tqdm import tqdm\n\nfrom network.BEV_Unet import BEV_Unet\nfrom network.ptBEV import ptBEVnet\nfrom dataloader.dataset import collate_fn_BEV,collate_fn_BEV_test,SemKITTI,SemKITTI_label_name,spherical_dataset,voxel_dataset\n#ignore weird np warning\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef fast_hist(pred, label, n):\n k = (label >= 0) & (label < n)\n bin_count=np.bincount(\n n * label[k].astype(int) + pred[k], minlength=n ** 2)\n return bin_count[:n ** 2].reshape(n, n)\n\ndef per_class_iu(hist):\n return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))\n\ndef fast_hist_crop(output, target, unique_label):\n hist = fast_hist(output.flatten(), target.flatten(), np.max(unique_label)+1)\n hist=hist[unique_label,:]\n hist=hist[:,unique_label]\n return hist\n\ndef SemKITTI2train(label):\n if isinstance(label, list):\n return [SemKITTI2train_single(a) for a in label]\n else:\n return SemKITTI2train_single(label)\n\ndef SemKITTI2train_single(label):\n remove_ind = label == 0\n label -= 1\n label[remove_ind] = 255\n return label\n\ndef train2SemKITTI(input_label):\n # delete 0 label\n new_labels=np.copy(input_label)\n new_labels[input_label==255]=0\n for label_num in range(0,19):\n new_labels[input_label==label_num]=label_num+1\n return new_labels\n\ndef main(args):\n data_path = args.data_dir\n test_batch_size = args.test_batch_size\n model_save_path = args.model_save_path\n output_path = args.test_output_path\n compression_model = args.grid_size[2]\n grid_size = args.grid_size\n pytorch_device = torch.device('cuda:0')\n model = args.model\n if model == 'polar':\n fea_dim = 9\n circular_padding = True\n elif model == 'traditional':\n fea_dim = 7\n circular_padding = False\n\n # prepare miou fun\n unique_label=np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1\n unique_label_str=[SemKITTI_label_name[x] for x in unique_label+1]\n\n # prepare model\n my_BEV_model=BEV_Unet(n_class=len(unique_label), n_height = compression_model, input_batch_norm = True, dropout = 0.5, circular_padding = circular_padding)\n my_model = ptBEVnet(my_BEV_model, pt_model = 'pointnet', grid_size = grid_size, fea_dim = fea_dim, max_pt_per_encode = 256,\n out_pt_fea_dim = 512, kernal_size = 1, pt_selection = 'random', fea_compre = compression_model)\n if os.path.exists(model_save_path):\n my_model.load_state_dict(torch.load(model_save_path))\n my_model.to(pytorch_device)\n\n # prepare dataset\n test_pt_dataset = SemKITTI(data_path + '/sequences/', imageset = 'test', return_ref = True)\n val_pt_dataset = SemKITTI(data_path + '/sequences/', imageset = 'val', return_ref = True)\n if model == 'polar':\n test_dataset=spherical_dataset(test_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True, return_test= True)\n val_dataset=spherical_dataset(val_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True)\n elif model == 'traditional':\n test_dataset=voxel_dataset(test_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True, return_test= True)\n val_dataset=voxel_dataset(val_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True)\n test_dataset_loader = torch.utils.data.DataLoader(dataset = test_dataset,\n batch_size = test_batch_size,\n collate_fn = collate_fn_BEV_test,\n shuffle = False,\n num_workers = 4)\n val_dataset_loader = torch.utils.data.DataLoader(dataset = val_dataset,\n batch_size = test_batch_size,\n collate_fn = collate_fn_BEV,\n shuffle = False,\n num_workers = 4)\n\n # validation\n print('*'*80)\n print('Test network performance on validation split')\n print('*'*80)\n pbar = tqdm(total=len(val_dataset_loader))\n my_model.eval()\n hist_list = []\n time_list = []\n with torch.no_grad():\n for i_iter_val,(_,val_vox_label,val_grid,val_pt_labs,val_pt_fea) in enumerate(val_dataset_loader):\n val_vox_label = SemKITTI2train(val_vox_label)\n val_pt_labs = SemKITTI2train(val_pt_labs)\n val_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in val_pt_fea]\n val_grid_ten = [torch.from_numpy(i[:,:2]).to(pytorch_device) for i in val_grid]\n val_label_tensor=val_vox_label.type(torch.LongTensor).to(pytorch_device)\n\n torch.cuda.synchronize()\n start_time = time.time()\n predict_labels = my_model(val_pt_fea_ten, val_grid_ten)\n torch.cuda.synchronize()\n time_list.append(time.time()-start_time)\n\n predict_labels = torch.argmax(predict_labels,dim=1)\n predict_labels = predict_labels.cpu().detach().numpy()\n for count,i_val_grid in enumerate(val_grid):\n hist_list.append(fast_hist_crop(predict_labels[count,val_grid[count][:,0],val_grid[count][:,1],val_grid[count][:,2]],val_pt_labs[count],unique_label))\n pbar.update(1)\n iou = per_class_iu(sum(hist_list))\n print('Validation per class iou: ')\n for class_name, class_iou in zip(unique_label_str,iou):\n print('%s : %.2f%%' % (class_name, class_iou*100))\n val_miou = np.nanmean(iou) * 100\n del val_vox_label,val_grid,val_pt_fea,val_grid_ten\n pbar.close()\n print('Current val miou is %.3f ' % val_miou)\n print('Inference time per %d is %.4f seconds\\n' %\n (test_batch_size,np.mean(time_list)))\n \n # test\n print('*'*80)\n print('Generate predictions for test split')\n print('*'*80)\n pbar = tqdm(total=len(test_dataset_loader))\n for i_iter_test,(_,_,test_grid,_,test_pt_fea,test_index) in enumerate(test_dataset_loader):\n # predict\n test_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in test_pt_fea]\n test_grid_ten = [torch.from_numpy(i[:,:2]).to(pytorch_device) for i in test_grid]\n\n predict_labels = my_model(test_pt_fea_ten,test_grid_ten)\n predict_labels = torch.argmax(predict_labels,1)\n predict_labels = predict_labels.cpu().detach().numpy()\n # write to label file\n for count,i_test_grid in enumerate(test_grid):\n test_pred_label = predict_labels[count,test_grid[count][:,0],test_grid[count][:,1],test_grid[count][:,2]]\n test_pred_label = train2SemKITTI(test_pred_label)\n test_pred_label = np.expand_dims(test_pred_label,axis=1)\n save_dir = test_pt_dataset.im_idx[test_index[count]]\n _,dir2 = save_dir.split('/sequences/',1)\n new_save_dir = output_path + '/sequences/' +dir2.replace('velodyne','predictions')[:-3]+'label'\n if not os.path.exists(os.path.dirname(new_save_dir)):\n try:\n os.makedirs(os.path.dirname(new_save_dir))\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n test_pred_label = test_pred_label.astype(np.uint32)\n test_pred_label.tofile(new_save_dir)\n pbar.update(1)\n del test_grid,test_pt_fea,test_index\n pbar.close()\n print('Predicted test labels are saved in %s. Need to be shifted to original label format before submitting to the Competition website.' % output_path)\n print('Remap script can be found in semantic-kitti-api.')\n\nif __name__ == '__main__':\n # Testing settings\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-d', '--data_dir', default='data')\n parser.add_argument('-p', '--model_save_path', default='pretained_weight/SemKITTI_PolarSeg.pt')\n parser.add_argument('-o', '--test_output_path', default='out/SemKITTI_test')\n parser.add_argument('-m', '--model', choices=['polar','traditional'], default='polar', help='training model: polar or traditional (default: polar)')\n parser.add_argument('-s', '--grid_size', nargs='+', type=int, default = [480,360,32], help='grid size of BEV representation (default: [480,360,32])')\n parser.add_argument('--test_batch_size', type=int, default=1, help='batch size for training (default: 1)')\n \n args = parser.parse_args()\n if not len(args.grid_size) == 3:\n raise Exception('Invalid grid size! Grid size should have 3 dimensions.')\n\n print(' '.join(sys.argv))\n print(args)\n main(args)" ]
[ [ "numpy.diag", "torch.cuda.synchronize", "numpy.expand_dims", "torch.load", "torch.utils.data.DataLoader", "torch.from_numpy", "numpy.max", "numpy.copy", "torch.no_grad", "numpy.nanmean", "numpy.mean", "torch.device", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zblumen/stellargraph
[ "10e62006907dd5968286f33648d1054e9c961c1b" ]
[ "stellargraph/mapper/mini_batch_node_generators.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright 2018-2020 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nMappers to provide input data for the graph models in layers.\n\n\"\"\"\n__all__ = [\"ClusterNodeGenerator\", \"ClusterNodeSequence\"]\n\nimport random\nimport copy\nimport numpy as np\nimport networkx as nx\nfrom tensorflow.keras.utils import Sequence\n\nfrom scipy import sparse\nfrom ..core.graph import StellarGraph\nfrom ..core.utils import is_real_iterable\n\n\nclass ClusterNodeGenerator:\n \"\"\"\n A data generator for use with ClusterGCN models on homogeneous graphs, [1].\n\n The supplied graph G should be a StellarGraph object that is ready for\n machine learning. Currently the model requires node features to be available for all\n nodes in the graph.\n Use the :meth:`flow` method supplying the nodes and (optionally) targets\n to get an object that can be used as a Keras data generator.\n\n This generator will supply the features array and the adjacency matrix to a\n mini-batch Keras graph ML model.\n\n [1] `W. Chiang, X. Liu, S. Si, Y. Li, S. Bengio, C. Hsieh, 2019 <https://arxiv.org/abs/1905.07953>`_.\n\n For more information, please see the ClusterGCN demo:\n `<https://github.com/stellargraph/stellargraph/blob/master/demos/>`_\n\n Args:\n G (StellarGraph): a machine-learning StellarGraph-type graph\n clusters (int or list): If int then it indicates the number of clusters (default is 1 that is the given graph).\n If clusters is greater than 1, then nodes are uniformly at random assigned to a cluster. If list,\n then it should be a list of lists of node IDs such that each list corresponds to a cluster of nodes\n in G. The clusters should be non-overlapping.\n q (float): The number of clusters to combine for each mini-batch. The default is 1.\n lam (float): The mixture coefficient for adjacency matrix normalisation.\n name (str): an optional name of the generator\n \"\"\"\n\n def __init__(self, G, clusters=1, q=1, lam=0.1, name=None):\n\n if not isinstance(G, StellarGraph):\n raise TypeError(\"Graph must be a StellarGraph or StellarDiGraph object.\")\n\n self.graph = G\n self.name = name\n self.q = q # The number of clusters to sample per mini-batch\n self.lam = lam\n self.clusters = clusters\n\n if isinstance(clusters, list):\n self.k = len(clusters)\n elif isinstance(clusters, int):\n if clusters <= 0:\n raise ValueError(\n \"{}: clusters must be greater than 0.\".format(type(self).__name__)\n )\n self.k = clusters\n else:\n raise TypeError(\n \"{}: clusters must be either int or list type.\".format(\n type(self).__name__\n )\n )\n\n # Some error checking on the given parameter values\n if not isinstance(lam, float):\n raise TypeError(\"{}: lam must be a float type.\".format(type(self).__name__))\n\n if lam < 0 or lam > 1:\n raise ValueError(\n \"{}: lam must be in the range [0, 1].\".format(type(self).__name__)\n )\n\n if not isinstance(q, int):\n raise TypeError(\"{}: q must be integer type.\".format(type(self).__name__))\n\n if q <= 0:\n raise ValueError(\n \"{}: q must be greater than 0.\".format(type(self).__name__)\n )\n\n if self.k % q != 0:\n raise ValueError(\n \"{}: the number of clusters must be exactly divisible by q.\".format(\n type(self).__name__\n )\n )\n\n # Check if the graph has features\n G.check_graph_for_ml()\n\n self.node_list = list(G.nodes())\n\n # Check that there is only a single node type\n if len(G.node_types) > 1:\n raise ValueError(\n \"{}: node generator requires graph with single node type; \"\n \"a graph with multiple node types is passed. Stopping.\".format(\n type(self).__name__\n )\n )\n\n if isinstance(clusters, int):\n # We are not given graph clusters.\n # We are going to split the graph into self.k random clusters\n all_nodes = list(G.nodes())\n random.shuffle(all_nodes)\n cluster_size = len(all_nodes) // self.k\n self.clusters = [\n all_nodes[i : i + cluster_size]\n for i in range(0, len(all_nodes), cluster_size)\n ]\n if len(self.clusters) > self.k:\n # for the case that the number of nodes is not exactly divisible by k, we combine\n # the last cluster with the second last one\n self.clusters[-2].extend(self.clusters[-1])\n del self.clusters[-1]\n\n print(f\"Number of clusters {self.k}\")\n for i, c in enumerate(self.clusters):\n print(f\"{i} cluster has size {len(c)}\")\n\n # Get the features for the nodes\n self.features = G.node_features(self.node_list)\n\n def flow(self, node_ids, targets=None, name=None):\n \"\"\"\n Creates a generator/sequence object for training, evaluation, or prediction\n with the supplied node ids and numeric targets.\n\n Args:\n node_ids (iterable): an iterable of node ids for the nodes of interest\n (e.g., training, validation, or test set nodes)\n targets (2d array, optional): a 2D array of numeric node targets with shape `(len(node_ids),\n target_size)`\n name (str, optional): An optional name for the returned generator object.\n\n Returns:\n A ClusterNodeSequence object to use with ClusterGCN in Keras\n methods :meth:`fit_generator`, :meth:`evaluate_generator`, and :meth:`predict_generator`\n\n \"\"\"\n if targets is not None:\n # Check targets is an iterable\n if not is_real_iterable(targets):\n raise TypeError(\n \"{}: Targets must be an iterable or None\".format(\n type(self).__name__\n )\n )\n\n # Check targets correct shape\n if len(targets) != len(node_ids):\n raise ValueError(\n \"{}: Targets must be the same length as node_ids\".format(\n type(self).__name__\n )\n )\n\n return ClusterNodeSequence(\n self.graph,\n self.clusters,\n targets=targets,\n node_ids=node_ids,\n q=self.q,\n lam=self.lam,\n name=name,\n )\n\n\nclass ClusterNodeSequence(Sequence):\n \"\"\"\n A Keras-compatible data generator for node inference using ClusterGCN model.\n Use this class with the Keras methods :meth:`keras.Model.fit_generator`,\n :meth:`keras.Model.evaluate_generator`, and\n :meth:`keras.Model.predict_generator`,\n\n This class should be created using the `.flow(...)` method of\n :class:`ClusterNodeGenerator`.\n\n Args:\n graph (StellarGraph): The graph\n clusters (list): A list of lists such that each sub-list indicates the nodes in a cluster.\n The length of this list, len(clusters) indicates the number of batches in one epoch.\n targets (np.ndarray, optional): An optional array of node targets of size (N x C),\n where C is the target size (e.g., number of classes for one-hot class targets)\n node_ids (iterable, optional): The node IDs for the target nodes. Required if targets is not None.\n normalize_adj (bool, optional): Specifies whether the adjacency matrix for each mini-batch should\n be normalized or not. The default is True.\n q (int, optional): The number of subgraphs to combine for each batch. The default value is\n 1 such that the generator treats each subgraph as a batch.\n lam (float, optional): The mixture coefficient for adjacency matrix normalisation (the\n 'diagonal enhancement' method). Valid values are in the interval [0, 1] and the default value is 0.1.\n name (str, optional): An optional name for this generator object.\n \"\"\"\n\n def __init__(\n self,\n graph,\n clusters,\n targets=None,\n node_ids=None,\n normalize_adj=True,\n q=1,\n lam=0.1,\n name=None,\n ):\n\n self.name = name\n self.clusters = list()\n self.clusters_original = copy.deepcopy(clusters)\n self.graph = graph\n self.node_list = list(graph.nodes())\n self.normalize_adj = normalize_adj\n self.q = q\n self.lam = lam\n self.node_order = list()\n self._node_order_in_progress = list()\n self.__node_buffer = dict()\n self.target_ids = list()\n\n if len(clusters) % self.q != 0:\n raise ValueError(\n \"The number of clusters should be exactly divisible by q. However, {} number of clusters is not exactly divisible by {}.\".format(\n len(clusters), q\n )\n )\n\n if node_ids is not None:\n self.target_ids = list(node_ids)\n\n if targets is not None:\n if node_ids is None:\n raise ValueError(\n \"Since targets is not None, node_ids must be given and cannot be None.\"\n )\n\n if len(node_ids) != len(targets):\n raise ValueError(\n \"When passed together targets and indices should be the same length.\"\n )\n\n self.targets = np.asanyarray(targets)\n self.target_node_lookup = dict(\n zip(self.target_ids, range(len(self.target_ids)))\n )\n else:\n self.targets = None\n\n self.on_epoch_end()\n\n def __len__(self):\n num_batches = len(self.clusters_original) // self.q\n return num_batches\n\n def __getitem__(self, index):\n # The next batch should be the adjacency matrix for the cluster and the corresponding feature vectors\n # and targets if available.\n cluster = self.clusters[index]\n adj_cluster = self.graph.to_adjacency_matrix(cluster)\n\n # The operations to normalize the adjacency matrix are too slow.\n # Either optimize this or implement as a layer(?)\n if self.normalize_adj:\n # add self loops\n adj_cluster.setdiag(1) # add self loops\n degree_matrix_diag = 1.0 / (adj_cluster.sum(axis=1) + 1)\n degree_matrix_diag = np.squeeze(np.asarray(degree_matrix_diag))\n degree_matrix = sparse.lil_matrix(adj_cluster.shape)\n degree_matrix.setdiag(degree_matrix_diag)\n adj_cluster = degree_matrix.tocsr() @ adj_cluster\n adj_cluster.setdiag((1.0 + self.lam) * adj_cluster.diagonal())\n\n adj_cluster = adj_cluster.toarray()\n\n g_node_list = list(cluster)\n\n # Determine the target nodes that exist in this cluster\n target_nodes_in_cluster = np.asanyarray(\n list(set(g_node_list).intersection(self.target_ids))\n )\n\n self.__node_buffer[index] = target_nodes_in_cluster\n\n # Dictionary to store node indices for quicker node index lookups\n node_lookup = dict(zip(g_node_list, range(len(g_node_list))))\n\n # The list of indices of the target nodes in self.node_list\n target_node_indices = np.array(\n [node_lookup[n] for n in target_nodes_in_cluster]\n )\n\n if index == (len(self.clusters_original) // self.q) - 1:\n # last batch\n self.__node_buffer_dict_to_list()\n\n cluster_targets = None\n #\n if self.targets is not None:\n # Dictionary to store node indices for quicker node index lookups\n # The list of indices of the target nodes in self.node_list\n cluster_target_indices = np.array(\n [self.target_node_lookup[n] for n in target_nodes_in_cluster]\n )\n cluster_targets = self.targets[cluster_target_indices]\n cluster_targets = cluster_targets.reshape((1,) + cluster_targets.shape)\n\n features = self.graph.node_features(g_node_list)\n\n features = np.reshape(features, (1,) + features.shape)\n adj_cluster = adj_cluster.reshape((1,) + adj_cluster.shape)\n target_node_indices = target_node_indices[np.newaxis, np.newaxis, :]\n\n return [features, target_node_indices, adj_cluster], cluster_targets\n\n def __node_buffer_dict_to_list(self):\n self.node_order = []\n for k, v in self.__node_buffer.items():\n self.node_order.extend(v)\n\n def on_epoch_end(self):\n \"\"\"\n Shuffle all nodes at the end of each epoch\n \"\"\"\n if self.q > 1:\n # combine clusters\n cluster_indices = list(range(len(self.clusters_original)))\n random.shuffle(cluster_indices)\n self.clusters = []\n\n for i in range(0, len(cluster_indices) - 1, self.q):\n cc = cluster_indices[i : i + self.q]\n tmp = []\n for l in cc:\n tmp.extend(list(self.clusters_original[l]))\n self.clusters.append(tmp)\n else:\n self.clusters = copy.deepcopy(self.clusters_original)\n\n self.__node_buffer = dict()\n\n random.shuffle(self.clusters)\n" ]
[ [ "numpy.reshape", "numpy.asarray", "numpy.asanyarray", "numpy.array", "scipy.sparse.lil_matrix" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
robbjr/datasets
[ "fbb2af9d0e88f8e2ae884e9764fbeff2ee487813" ]
[ "tensorflow_datasets/testing/mocking.py" ]
[ "# coding=utf-8\n# Copyright 2019 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Mock util for tfds.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport os\nimport random\n\nfrom absl.testing import absltest\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_datasets.core import features as features_lib\n\n\[email protected]\ndef mock_data(num_examples=1, as_dataset_fn=None, data_dir=None):\n \"\"\"Mock tfds to generate random data.\n\n This function requires the true metadata files (dataset_info.json, label.txt,\n vocabulary files) to be stored in `data_dir/dataset_name/version`, as they\n would be for the true dataset.\n The actual examples will be randomly generated using\n `builder.info.features.get_tensor_info()`.\n Download and prepare step will be skipped.\n\n Warning: As the mocked builder will use the true metadata (label names,...),\n the `info.split['train'].num_examples` won't match `len(list(ds_train))`.\n\n Usage (automated):\n\n ```\n with mock_data(num_examples=5):\n ds = tfds.load('some_dataset', split='train')\n\n for ex in ds: # ds will yield randomly generated examples.\n ex\n ```\n\n If you want more fine grain control over the generated examples, you can\n manually overwrite the `DatasetBuilder._as_dataset` method.\n Usage (manual):\n\n ```\n def as_dataset(self, *args, **kwargs):\n return tf.data.Dataset.from_generator(\n lambda: ({\n 'image': np.ones(shape=(28, 28, 1), dtype=np.uint8),\n 'label': i % 10,\n } for i in range(num_examples)),\n output_types=self.info.features.dtype,\n output_shapes=self.info.features.shape,\n )\n\n with mock_data(as_dataset_fn=as_dataset):\n ds = tfds.load('some_dataset', split='train')\n\n for ex in ds: # ds will yield the fake data example of 'as_dataset'.\n ex\n ```\n\n Args:\n num_examples: `int`, the number of fake example to generate.\n as_dataset_fn: if provided, will replace the default random example\n generator. This function mock the `FileAdapterBuilder._as_dataset`\n data_dir: `str`, `data_dir` folder from where to load the metadata.\n Will overwrite `data_dir` kwargs from `tfds.load`.\n\n Yields:\n None\n \"\"\"\n\n def mock_download_and_prepare(self, *args, **kwargs):\n del args\n del kwargs\n if not tf.io.gfile.exists(self._data_dir): # pylint: disable=protected-access\n raise ValueError(\n 'TFDS has been mocked, but metadata files where not found in {}. '\n 'You should copy the real metadata files, so that the dataset '\n 'can be loaded properly, or set the data_dir kwarg of'\n 'tfds.testing.mock_tfds(data_dir=...).'\n ''.format(self._data_dir) # pylint: disable=protected-access\n )\n\n def mock_as_dataset(self, *args, **kwargs):\n del args\n del kwargs\n ds = tf.data.Dataset.from_generator(\n lambda: (_generate_random_example(self) for _ in range(num_examples)),\n output_types=self.info.features.dtype,\n output_shapes=self.info.features.shape,\n )\n return ds\n\n if not as_dataset_fn:\n as_dataset_fn = mock_as_dataset\n\n if not data_dir:\n data_dir = os.path.join(os.path.dirname(__file__), 'metadata')\n\n download_and_prepare_path = 'tensorflow_datasets.core.dataset_builder.DatasetBuilder.download_and_prepare'\n as_dataset_path = 'tensorflow_datasets.core.dataset_builder.FileAdapterBuilder._as_dataset'\n data_dir_path = 'tensorflow_datasets.core.constants.DATA_DIR'\n\n with absltest.mock.patch(as_dataset_path, as_dataset_fn), \\\n absltest.mock.patch(\n download_and_prepare_path, mock_download_and_prepare), \\\n absltest.mock.patch(data_dir_path, data_dir):\n yield\n\n\ndef _generate_random_array(feature, tensor_info):\n \"\"\"Generates a random tensor for a single feature.\"\"\"\n # TODO(tfds): Could improve the fake generatiion:\n # * Use the feature statistics (min, max)\n # * For Sequence features\n # * For Text\n shape = [ # Fill dynamic shape with random values\n np.random.randint(5, 50) if s is None else s\n for s in tensor_info.shape\n ]\n if isinstance(feature, features_lib.ClassLabel):\n max_value = feature.num_classes\n elif isinstance(feature, features_lib.Text) and feature.vocab_size:\n max_value = feature.vocab_size\n else:\n max_value = 255\n\n # Generate some random values, depending on the dtype\n if tensor_info.dtype.is_integer:\n return np.random.randint(0, max_value, shape)\n elif tensor_info.dtype.is_floating:\n return np.random.random_sample(shape)\n elif tensor_info.dtype == tf.string:\n return ''.join(\n random.choice(' abcdefghij') for _ in range(random.randint(10, 20)))\n else:\n raise ValueError('Fake generation not supported for {}'.format(\n tensor_info.dtype))\n\n\ndef _generate_random_example(builder):\n root_feature = builder.info.features\n flat_features = root_feature._flatten(root_feature) # pylint: disable=protected-access\n flat_tensor_info = root_feature._flatten(root_feature.get_tensor_info()) # pylint: disable=protected-access\n flat_np = [\n _generate_random_array(feature, tensor_info)\n for feature, tensor_info in zip(flat_features, flat_tensor_info)\n ]\n return root_feature._nest(flat_np) # pylint: disable=protected-access\n" ]
[ [ "tensorflow.io.gfile.exists", "numpy.random.random_sample", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]