repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
thewtex/scikit-image
[ "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97", "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97", "22bb6b94698b8889cbdf26b25d9e4fdb8b968d97" ]
[ "skimage/segmentation/tests/test_random_walker.py", "skimage/restoration/deconvolution.py", "skimage/feature/peak.py" ]
[ "import numpy as np\nfrom skimage.segmentation import random_walker\nfrom skimage.transform import resize\nfrom skimage._shared._warnings import expected_warnings\nfrom skimage._shared import testing\n\n\n# older versions of scipy raise a warning with new NumPy because they use\n# numpy.rank() instead of arr.ndim or numpy.linalg.matrix_rank.\nSCIPY_EXPECTED = 'numpy.linalg.matrix_rank|\\A\\Z'\nPYAMG_EXPECTED_WARNING = 'pyamg|\\A\\Z'\nPYAMG_SCIPY_EXPECTED = SCIPY_EXPECTED + '|' + PYAMG_EXPECTED_WARNING\n\n\ndef make_2d_syntheticdata(lx, ly=None):\n if ly is None:\n ly = lx\n np.random.seed(1234)\n data = np.zeros((lx, ly)) + 0.1 * np.random.randn(lx, ly)\n small_l = int(lx // 5)\n data[lx // 2 - small_l:lx // 2 + small_l,\n ly // 2 - small_l:ly // 2 + small_l] = 1\n data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,\n ly // 2 - small_l + 1:ly // 2 + small_l - 1] = (\n 0.1 * np.random.randn(2 * small_l - 2, 2 * small_l - 2))\n data[lx // 2 - small_l, ly // 2 - small_l // 8:ly // 2 + small_l // 8] = 0\n seeds = np.zeros_like(data)\n seeds[lx // 5, ly // 5] = 1\n seeds[lx // 2 + small_l // 4, ly // 2 - small_l // 4] = 2\n return data, seeds\n\n\ndef make_3d_syntheticdata(lx, ly=None, lz=None):\n if ly is None:\n ly = lx\n if lz is None:\n lz = lx\n np.random.seed(1234)\n data = np.zeros((lx, ly, lz)) + 0.1 * np.random.randn(lx, ly, lz)\n small_l = int(lx // 5)\n data[lx // 2 - small_l:lx // 2 + small_l,\n ly // 2 - small_l:ly // 2 + small_l,\n lz // 2 - small_l:lz // 2 + small_l] = 1\n data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,\n ly // 2 - small_l + 1:ly // 2 + small_l - 1,\n lz // 2 - small_l + 1:lz // 2 + small_l - 1] = 0\n # make a hole\n hole_size = np.max([1, small_l // 8])\n data[lx // 2 - small_l,\n ly // 2 - hole_size:ly // 2 + hole_size,\n lz // 2 - hole_size:lz // 2 + hole_size] = 0\n seeds = np.zeros_like(data)\n seeds[lx // 5, ly // 5, lz // 5] = 1\n seeds[lx // 2 + small_l // 4,\n ly // 2 - small_l // 4,\n lz // 2 - small_l // 4] = 2\n return data, seeds\n\n\ndef test_2d_bf():\n lx = 70\n ly = 100\n data, labels = make_2d_syntheticdata(lx, ly)\n labels_bf = random_walker(data, labels, beta=90, mode='bf')\n assert (labels_bf[25:45, 40:60] == 2).all()\n assert data.shape == labels.shape\n full_prob_bf = random_walker(data, labels, beta=90, mode='bf',\n return_full_prob=True)\n assert (full_prob_bf[1, 25:45, 40:60] >=\n full_prob_bf[0, 25:45, 40:60]).all()\n assert data.shape == labels.shape\n # Now test with more than two labels\n labels[55, 80] = 3\n full_prob_bf = random_walker(data, labels, beta=90, mode='bf',\n return_full_prob=True)\n assert (full_prob_bf[1, 25:45, 40:60] >=\n full_prob_bf[0, 25:45, 40:60]).all()\n assert len(full_prob_bf) == 3\n assert data.shape == labels.shape\n\n\ndef test_2d_cg():\n lx = 70\n ly = 100\n data, labels = make_2d_syntheticdata(lx, ly)\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n labels_cg = random_walker(data, labels, beta=90, mode='cg')\n assert (labels_cg[25:45, 40:60] == 2).all()\n assert data.shape == labels.shape\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n full_prob = random_walker(data, labels, beta=90, mode='cg',\n return_full_prob=True)\n assert (full_prob[1, 25:45, 40:60] >=\n full_prob[0, 25:45, 40:60]).all()\n assert data.shape == labels.shape\n return data, labels_cg\n\n\ndef test_2d_cg_mg():\n lx = 70\n ly = 100\n data, labels = make_2d_syntheticdata(lx, ly)\n expected = 'scipy.sparse.sparsetools|%s' % PYAMG_SCIPY_EXPECTED\n with expected_warnings([expected]):\n labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')\n assert (labels_cg_mg[25:45, 40:60] == 2).all()\n assert data.shape == labels.shape\n with expected_warnings([expected]):\n full_prob = random_walker(data, labels, beta=90, mode='cg_mg',\n return_full_prob=True)\n assert (full_prob[1, 25:45, 40:60] >=\n full_prob[0, 25:45, 40:60]).all()\n assert data.shape == labels.shape\n return data, labels_cg_mg\n\n\ndef test_types():\n lx = 70\n ly = 100\n data, labels = make_2d_syntheticdata(lx, ly)\n data = 255 * (data - data.min()) // (data.max() - data.min())\n data = data.astype(np.uint8)\n with expected_warnings([PYAMG_SCIPY_EXPECTED]):\n labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')\n assert (labels_cg_mg[25:45, 40:60] == 2).all()\n assert data.shape == labels.shape\n return data, labels_cg_mg\n\n\ndef test_reorder_labels():\n lx = 70\n ly = 100\n data, labels = make_2d_syntheticdata(lx, ly)\n labels[labels == 2] = 4\n labels_bf = random_walker(data, labels, beta=90, mode='bf')\n assert (labels_bf[25:45, 40:60] == 2).all()\n assert data.shape == labels.shape\n return data, labels_bf\n\n\ndef test_2d_inactive():\n lx = 70\n ly = 100\n data, labels = make_2d_syntheticdata(lx, ly)\n labels[10:20, 10:20] = -1\n labels[46:50, 33:38] = -2\n labels = random_walker(data, labels, beta=90)\n assert (labels.reshape((lx, ly))[25:45, 40:60] == 2).all()\n assert data.shape == labels.shape\n return data, labels\n\n\ndef test_3d():\n n = 30\n lx, ly, lz = n, n, n\n data, labels = make_3d_syntheticdata(lx, ly, lz)\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n labels = random_walker(data, labels, mode='cg')\n assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()\n assert data.shape == labels.shape\n return data, labels\n\n\ndef test_3d_inactive():\n n = 30\n lx, ly, lz = n, n, n\n data, labels = make_3d_syntheticdata(lx, ly, lz)\n old_labels = np.copy(labels)\n labels[5:25, 26:29, 26:29] = -1\n after_labels = np.copy(labels)\n with expected_warnings(['\"cg\" mode|CObject type' + '|' + SCIPY_EXPECTED]):\n labels = random_walker(data, labels, mode='cg')\n assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()\n assert data.shape == labels.shape\n return data, labels, old_labels, after_labels\n\n\ndef test_multispectral_2d():\n lx, ly = 70, 100\n data, labels = make_2d_syntheticdata(lx, ly)\n data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n multi_labels = random_walker(data, labels, mode='cg',\n multichannel=True)\n assert data[..., 0].shape == labels.shape\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n single_labels = random_walker(data[..., 0], labels, mode='cg')\n assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all()\n assert data[..., 0].shape == labels.shape\n return data, multi_labels, single_labels, labels\n\n\ndef test_multispectral_3d():\n n = 30\n lx, ly, lz = n, n, n\n data, labels = make_3d_syntheticdata(lx, ly, lz)\n data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n multi_labels = random_walker(data, labels, mode='cg',\n multichannel=True)\n assert data[..., 0].shape == labels.shape\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n single_labels = random_walker(data[..., 0], labels, mode='cg')\n assert (multi_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()\n assert (single_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()\n assert data[..., 0].shape == labels.shape\n return data, multi_labels, single_labels, labels\n\n\ndef test_spacing_0():\n n = 30\n lx, ly, lz = n, n, n\n data, _ = make_3d_syntheticdata(lx, ly, lz)\n\n # Rescale `data` along Z axis\n data_aniso = np.zeros((n, n, n // 2))\n for i, yz in enumerate(data):\n data_aniso[i, :, :] = resize(yz, (n, n // 2),\n mode='constant',\n anti_aliasing=False)\n\n # Generate new labels\n small_l = int(lx // 5)\n labels_aniso = np.zeros_like(data_aniso)\n labels_aniso[lx // 5, ly // 5, lz // 5] = 1\n labels_aniso[lx // 2 + small_l // 4,\n ly // 2 - small_l // 4,\n lz // 4 - small_l // 8] = 2\n\n # Test with `spacing` kwarg\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',\n spacing=(1., 1., 0.5))\n\n assert (labels_aniso[13:17, 13:17, 7:9] == 2).all()\n\n\ndef test_spacing_1():\n n = 30\n lx, ly, lz = n, n, n\n data, _ = make_3d_syntheticdata(lx, ly, lz)\n\n # Rescale `data` along Y axis\n # `resize` is not yet 3D capable, so this must be done by looping in 2D.\n data_aniso = np.zeros((n, n * 2, n))\n for i, yz in enumerate(data):\n data_aniso[i, :, :] = resize(yz, (n * 2, n),\n mode='constant',\n anti_aliasing=False)\n\n # Generate new labels\n small_l = int(lx // 5)\n labels_aniso = np.zeros_like(data_aniso)\n labels_aniso[lx // 5, ly // 5, lz // 5] = 1\n labels_aniso[lx // 2 + small_l // 4,\n ly - small_l // 2,\n lz // 2 - small_l // 4] = 2\n\n # Test with `spacing` kwarg\n # First, anisotropic along Y\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',\n spacing=(1., 2., 1.))\n assert (labels_aniso[13:17, 26:34, 13:17] == 2).all()\n\n # Rescale `data` along X axis\n # `resize` is not yet 3D capable, so this must be done by looping in 2D.\n data_aniso = np.zeros((n, n * 2, n))\n for i in range(data.shape[1]):\n data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n),\n mode='constant',\n anti_aliasing=False)\n\n # Generate new labels\n small_l = int(lx // 5)\n labels_aniso2 = np.zeros_like(data_aniso)\n labels_aniso2[lx // 5, ly // 5, lz // 5] = 1\n labels_aniso2[lx - small_l // 2,\n ly // 2 + small_l // 4,\n lz // 2 - small_l // 4] = 2\n\n # Anisotropic along X\n with expected_warnings(['\"cg\" mode' + '|' + SCIPY_EXPECTED]):\n labels_aniso2 = random_walker(data_aniso,\n labels_aniso2,\n mode='cg', spacing=(2., 1., 1.))\n assert (labels_aniso2[26:34, 13:17, 13:17] == 2).all()\n\n\ndef test_trivial_cases():\n # When all voxels are labeled\n img = np.ones((10, 10))\n labels = np.ones((10, 10))\n\n with expected_warnings([\"Returning provided labels\"]):\n pass_through = random_walker(img, labels)\n np.testing.assert_array_equal(pass_through, labels)\n\n # When all voxels are labeled AND return_full_prob is True\n labels[:, :5] = 3\n expected = np.concatenate(((labels == 1)[..., np.newaxis],\n (labels == 3)[..., np.newaxis]), axis=2)\n with expected_warnings([\"Returning provided labels\"]):\n test = random_walker(img, labels, return_full_prob=True)\n np.testing.assert_array_equal(test, expected)\n\n\ndef test_length2_spacing():\n # If this passes without raising an exception (warnings OK), the new\n # spacing code is working properly.\n np.random.seed(42)\n img = np.ones((10, 10)) + 0.2 * np.random.normal(size=(10, 10))\n labels = np.zeros((10, 10), dtype=np.uint8)\n labels[2, 4] = 1\n labels[6, 8] = 4\n random_walker(img, labels, spacing=(1., 2.))\n\n\ndef test_bad_inputs():\n # Too few dimensions\n img = np.ones(10)\n labels = np.arange(10)\n with testing.raises(ValueError):\n random_walker(img, labels)\n with testing.raises(ValueError):\n random_walker(img, labels, multichannel=True)\n\n # Too many dimensions\n np.random.seed(42)\n img = np.random.normal(size=(3, 3, 3, 3, 3))\n labels = np.arange(3 ** 5).reshape(img.shape)\n with testing.raises(ValueError):\n random_walker(img, labels)\n with testing.raises(ValueError):\n random_walker(img, labels, multichannel=True)\n\n # Spacing incorrect length\n img = np.random.normal(size=(10, 10))\n labels = np.zeros((10, 10))\n labels[2, 4] = 2\n labels[6, 8] = 5\n with testing.raises(ValueError):\n random_walker(img, labels, spacing=(1,))\n\n # Invalid mode\n img = np.random.normal(size=(10, 10))\n labels = np.zeros((10, 10))\n with testing.raises(ValueError):\n random_walker(img, labels, mode='bad')\n\n\ndef test_isolated_seeds():\n np.random.seed(0)\n a = np.random.random((7, 7))\n mask = - np.ones(a.shape)\n # This pixel is an isolated seed\n mask[1, 1] = 1\n # Unlabeled pixels\n mask[3:, 3:] = 0\n # Seeds connected to unlabeled pixels\n mask[4, 4] = 2\n mask[6, 6] = 1\n\n # Test that no error is raised, and that labels of isolated seeds are OK\n res = random_walker(a, mask)\n assert res[1, 1] == 1\n res = random_walker(a, mask, return_full_prob=True)\n assert res[0, 1, 1] == 1\n assert res[1, 1, 1] == 0\n", "\"\"\"Implementations restoration functions\"\"\"\n\n\nimport numpy as np\nimport numpy.random as npr\nfrom scipy.signal import fftconvolve, convolve\n\nfrom . import uft\n\n__keywords__ = \"restoration, image, deconvolution\"\n\n\ndef wiener(image, psf, balance, reg=None, is_real=True, clip=True):\n \"\"\"Wiener-Hunt deconvolution\n\n Return the deconvolution with a Wiener-Hunt approach (i.e. with\n Fourier diagonalisation).\n\n Parameters\n ----------\n image : (M, N) ndarray\n Input degraded image\n psf : ndarray\n Point Spread Function. This is assumed to be the impulse\n response (input image space) if the data-type is real, or the\n transfer function (Fourier space) if the data-type is\n complex. There is no constraints on the shape of the impulse\n response. The transfer function must be of shape `(M, N)` if\n `is_real is True`, `(M, N // 2 + 1)` otherwise (see\n `np.fft.rfftn`).\n balance : float\n The regularisation parameter value that tunes the balance\n between the data adequacy that improve frequency restoration\n and the prior adequacy that reduce frequency restoration (to\n avoid noise artifacts).\n reg : ndarray, optional\n The regularisation operator. The Laplacian by default. It can\n be an impulse response or a transfer function, as for the\n psf. Shape constraint is the same as for the `psf` parameter.\n is_real : boolean, optional\n True by default. Specify if ``psf`` and ``reg`` are provided\n with hermitian hypothesis, that is only half of the frequency\n plane is provided (due to the redundancy of Fourier transform\n of real signal). It's apply only if ``psf`` and/or ``reg`` are\n provided as transfer function. For the hermitian property see\n ``uft`` module or ``np.fft.rfftn``.\n clip : boolean, optional\n True by default. If True, pixel values of the result above 1 or\n under -1 are thresholded for skimage pipeline compatibility.\n\n Returns\n -------\n im_deconv : (M, N) ndarray\n The deconvolved image.\n\n Examples\n --------\n >>> from skimage import color, data, restoration\n >>> img = color.rgb2gray(data.astronaut())\n >>> from scipy.signal import convolve2d\n >>> psf = np.ones((5, 5)) / 25\n >>> img = convolve2d(img, psf, 'same')\n >>> img += 0.1 * img.std() * np.random.standard_normal(img.shape)\n >>> deconvolved_img = restoration.wiener(img, psf, 1100)\n\n Notes\n -----\n This function applies the Wiener filter to a noisy and degraded\n image by an impulse response (or PSF). If the data model is\n\n .. math:: y = Hx + n\n\n where :math:`n` is noise, :math:`H` the PSF and :math:`x` the\n unknown original image, the Wiener filter is\n\n .. math::\n \\hat x = F^\\dagger (|\\Lambda_H|^2 + \\lambda |\\Lambda_D|^2)\n \\Lambda_H^\\dagger F y\n\n where :math:`F` and :math:`F^\\dagger` are the Fourier and inverse\n Fourier transfroms respectively, :math:`\\Lambda_H` the transfer\n function (or the Fourier transfrom of the PSF, see [Hunt] below)\n and :math:`\\Lambda_D` the filter to penalize the restored image\n frequencies (Laplacian by default, that is penalization of high\n frequency). The parameter :math:`\\lambda` tunes the balance\n between the data (that tends to increase high frequency, even\n those coming from noise), and the regularization.\n\n These methods are then specific to a prior model. Consequently,\n the application or the true image nature must corresponds to the\n prior model. By default, the prior model (Laplacian) introduce\n image smoothness or pixel correlation. It can also be interpreted\n as high-frequency penalization to compensate the instability of\n the solution with respect to the data (sometimes called noise\n amplification or \"explosive\" solution).\n\n Finally, the use of Fourier space implies a circulant property of\n :math:`H`, see [Hunt].\n\n References\n ----------\n .. [1] François Orieux, Jean-François Giovannelli, and Thomas\n Rodet, \"Bayesian estimation of regularization and point\n spread function parameters for Wiener-Hunt deconvolution\",\n J. Opt. Soc. Am. A 27, 1593-1607 (2010)\n\n http://www.opticsinfobase.org/josaa/abstract.cfm?URI=josaa-27-7-1593\n\n http://research.orieux.fr/files/papers/OGR-JOSA10.pdf\n\n .. [2] B. R. Hunt \"A matrix theory proof of the discrete\n convolution theorem\", IEEE Trans. on Audio and\n Electroacoustics, vol. au-19, no. 4, pp. 285-288, dec. 1971\n \"\"\"\n if reg is None:\n reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)\n if not np.iscomplexobj(reg):\n reg = uft.ir2tf(reg, image.shape, is_real=is_real)\n\n if psf.shape != reg.shape:\n trans_func = uft.ir2tf(psf, image.shape, is_real=is_real)\n else:\n trans_func = psf\n\n wiener_filter = np.conj(trans_func) / (np.abs(trans_func) ** 2 +\n balance * np.abs(reg) ** 2)\n if is_real:\n deconv = uft.uirfft2(wiener_filter * uft.urfft2(image),\n shape=image.shape)\n else:\n deconv = uft.uifft2(wiener_filter * uft.ufft2(image))\n\n if clip:\n deconv[deconv > 1] = 1\n deconv[deconv < -1] = -1\n\n return deconv\n\n\ndef unsupervised_wiener(image, psf, reg=None, user_params=None, is_real=True,\n clip=True):\n \"\"\"Unsupervised Wiener-Hunt deconvolution.\n\n Return the deconvolution with a Wiener-Hunt approach, where the\n hyperparameters are automatically estimated. The algorithm is a\n stochastic iterative process (Gibbs sampler) described in the\n reference below. See also ``wiener`` function.\n\n Parameters\n ----------\n image : (M, N) ndarray\n The input degraded image.\n psf : ndarray\n The impulse response (input image's space) or the transfer\n function (Fourier space). Both are accepted. The transfer\n function is automatically recognized as being complex\n (``np.iscomplexobj(psf)``).\n reg : ndarray, optional\n The regularisation operator. The Laplacian by default. It can\n be an impulse response or a transfer function, as for the psf.\n user_params : dict\n Dictionary of parameters for the Gibbs sampler. See below.\n clip : boolean, optional\n True by default. If true, pixel values of the result above 1 or\n under -1 are thresholded for skimage pipeline compatibility.\n\n Returns\n -------\n x_postmean : (M, N) ndarray\n The deconvolved image (the posterior mean).\n chains : dict\n The keys ``noise`` and ``prior`` contain the chain list of\n noise and prior precision respectively.\n\n Other parameters\n ----------------\n The keys of ``user_params`` are:\n\n threshold : float\n The stopping criterion: the norm of the difference between to\n successive approximated solution (empirical mean of object\n samples, see Notes section). 1e-4 by default.\n burnin : int\n The number of sample to ignore to start computation of the\n mean. 15 by default.\n min_iter : int\n The minimum number of iterations. 30 by default.\n max_iter : int\n The maximum number of iterations if ``threshold`` is not\n satisfied. 200 by default.\n callback : callable (None by default)\n A user provided callable to which is passed, if the function\n exists, the current image sample for whatever purpose. The user\n can store the sample, or compute other moments than the\n mean. It has no influence on the algorithm execution and is\n only for inspection.\n\n Examples\n --------\n >>> from skimage import color, data, restoration\n >>> img = color.rgb2gray(data.astronaut())\n >>> from scipy.signal import convolve2d\n >>> psf = np.ones((5, 5)) / 25\n >>> img = convolve2d(img, psf, 'same')\n >>> img += 0.1 * img.std() * np.random.standard_normal(img.shape)\n >>> deconvolved_img = restoration.unsupervised_wiener(img, psf)\n\n Notes\n -----\n The estimated image is design as the posterior mean of a\n probability law (from a Bayesian analysis). The mean is defined as\n a sum over all the possible images weighted by their respective\n probability. Given the size of the problem, the exact sum is not\n tractable. This algorithm use of MCMC to draw image under the\n posterior law. The practical idea is to only draw highly probable\n images since they have the biggest contribution to the mean. At the\n opposite, the less probable images are drawn less often since\n their contribution is low. Finally the empirical mean of these\n samples give us an estimation of the mean, and an exact\n computation with an infinite sample set.\n\n References\n ----------\n .. [1] François Orieux, Jean-François Giovannelli, and Thomas\n Rodet, \"Bayesian estimation of regularization and point\n spread function parameters for Wiener-Hunt deconvolution\",\n J. Opt. Soc. Am. A 27, 1593-1607 (2010)\n\n http://www.opticsinfobase.org/josaa/abstract.cfm?URI=josaa-27-7-1593\n\n http://research.orieux.fr/files/papers/OGR-JOSA10.pdf\n \"\"\"\n params = {'threshold': 1e-4, 'max_iter': 200,\n 'min_iter': 30, 'burnin': 15, 'callback': None}\n params.update(user_params or {})\n\n if reg is None:\n reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)\n if not np.iscomplexobj(reg):\n reg = uft.ir2tf(reg, image.shape, is_real=is_real)\n\n if psf.shape != reg.shape:\n trans_fct = uft.ir2tf(psf, image.shape, is_real=is_real)\n else:\n trans_fct = psf\n\n # The mean of the object\n x_postmean = np.zeros(trans_fct.shape)\n # The previous computed mean in the iterative loop\n prev_x_postmean = np.zeros(trans_fct.shape)\n\n # Difference between two successive mean\n delta = np.NAN\n\n # Initial state of the chain\n gn_chain, gx_chain = [1], [1]\n\n # The correlation of the object in Fourier space (if size is big,\n # this can reduce computation time in the loop)\n areg2 = np.abs(reg) ** 2\n atf2 = np.abs(trans_fct) ** 2\n\n # The Fourier transfrom may change the image.size attribut, so we\n # store it.\n if is_real:\n data_spectrum = uft.urfft2(image.astype(np.float))\n else:\n data_spectrum = uft.ufft2(image.astype(np.float))\n\n # Gibbs sampling\n for iteration in range(params['max_iter']):\n # Sample of Eq. 27 p(circX^k | gn^k-1, gx^k-1, y).\n\n # weighting (correlation in direct space)\n precision = gn_chain[-1] * atf2 + gx_chain[-1] * areg2 # Eq. 29\n excursion = np.sqrt(0.5) / np.sqrt(precision) * (\n np.random.standard_normal(data_spectrum.shape) +\n 1j * np.random.standard_normal(data_spectrum.shape))\n\n # mean Eq. 30 (RLS for fixed gn, gamma0 and gamma1 ...)\n wiener_filter = gn_chain[-1] * np.conj(trans_fct) / precision\n\n # sample of X in Fourier space\n x_sample = wiener_filter * data_spectrum + excursion\n if params['callback']:\n params['callback'](x_sample)\n\n # sample of Eq. 31 p(gn | x^k, gx^k, y)\n gn_chain.append(npr.gamma(image.size / 2,\n 2 / uft.image_quad_norm(data_spectrum -\n x_sample *\n trans_fct)))\n\n # sample of Eq. 31 p(gx | x^k, gn^k-1, y)\n gx_chain.append(npr.gamma((image.size - 1) / 2,\n 2 / uft.image_quad_norm(x_sample * reg)))\n\n # current empirical average\n if iteration > params['burnin']:\n x_postmean = prev_x_postmean + x_sample\n\n if iteration > (params['burnin'] + 1):\n current = x_postmean / (iteration - params['burnin'])\n previous = prev_x_postmean / (iteration - params['burnin'] - 1)\n\n delta = np.sum(np.abs(current - previous)) / \\\n np.sum(np.abs(x_postmean)) / (iteration - params['burnin'])\n\n prev_x_postmean = x_postmean\n\n # stop of the algorithm\n if (iteration > params['min_iter']) and (delta < params['threshold']):\n break\n\n # Empirical average \\approx POSTMEAN Eq. 44\n x_postmean = x_postmean / (iteration - params['burnin'])\n if is_real:\n x_postmean = uft.uirfft2(x_postmean, shape=image.shape)\n else:\n x_postmean = uft.uifft2(x_postmean)\n\n if clip:\n x_postmean[x_postmean > 1] = 1\n x_postmean[x_postmean < -1] = -1\n\n return (x_postmean, {'noise': gn_chain, 'prior': gx_chain})\n\n\ndef richardson_lucy(image, psf, iterations=50, clip=True):\n \"\"\"Richardson-Lucy deconvolution.\n\n Parameters\n ----------\n image : ndarray\n Input degraded image (can be N dimensional).\n psf : ndarray\n The point spread function.\n iterations : int\n Number of iterations. This parameter plays the role of\n regularisation.\n clip : boolean, optional\n True by default. If true, pixel value of the result above 1 or\n under -1 are thresholded for skimage pipeline compatibility.\n\n Returns\n -------\n im_deconv : ndarray\n The deconvolved image.\n\n Examples\n --------\n >>> from skimage import color, data, restoration\n >>> camera = color.rgb2gray(data.camera())\n >>> from scipy.signal import convolve2d\n >>> psf = np.ones((5, 5)) / 25\n >>> camera = convolve2d(camera, psf, 'same')\n >>> camera += 0.1 * camera.std() * np.random.standard_normal(camera.shape)\n >>> deconvolved = restoration.richardson_lucy(camera, psf, 5)\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution\n \"\"\"\n # compute the times for direct convolution and the fft method. The fft is of\n # complexity O(N log(N)) for each dimension and the direct method does\n # straight arithmetic (and is O(n*k) to add n elements k times)\n direct_time = np.prod(image.shape + psf.shape)\n fft_time = np.sum([n*np.log(n) for n in image.shape + psf.shape])\n\n # see whether the fourier transform convolution method or the direct\n # convolution method is faster (discussed in scikit-image PR #1792)\n time_ratio = 40.032 * fft_time / direct_time\n\n if time_ratio <= 1 or len(image.shape) > 2:\n convolve_method = fftconvolve\n else:\n convolve_method = convolve\n\n image = image.astype(np.float)\n psf = psf.astype(np.float)\n im_deconv = 0.5 * np.ones(image.shape)\n psf_mirror = psf[::-1, ::-1]\n\n for _ in range(iterations):\n relative_blur = image / convolve_method(im_deconv, psf, 'same')\n im_deconv *= convolve_method(relative_blur, psf_mirror, 'same')\n\n if clip:\n im_deconv[im_deconv > 1] = 1\n im_deconv[im_deconv < -1] = -1\n\n return im_deconv\n", "import numpy as np\nimport scipy.ndimage as ndi\nfrom ..segmentation import relabel_sequential\nfrom .. import measure\nfrom ..filters import rank_order\n\n\ndef _get_high_intensity_peaks(image, mask, num_peaks):\n \"\"\"\n Return the highest intensity peak coordinates.\n \"\"\"\n # get coordinates of peaks\n coord = np.nonzero(mask)\n # select num_peaks peaks\n if len(coord[0]) > num_peaks:\n intensities = image[coord]\n idx_maxsort = np.argsort(intensities)\n coord = np.transpose(coord)[idx_maxsort][-num_peaks:]\n else:\n coord = np.column_stack(coord)\n # Higest peak first\n return coord[::-1]\n\n\ndef peak_local_max(image, min_distance=1, threshold_abs=None,\n threshold_rel=None, exclude_border=True, indices=True,\n num_peaks=np.inf, footprint=None, labels=None,\n num_peaks_per_label=np.inf):\n \"\"\"Find peaks in an image as coordinate list or boolean mask.\n\n Peaks are the local maxima in a region of `2 * min_distance + 1`\n (i.e. peaks are separated by at least `min_distance`).\n\n If peaks are flat (i.e. multiple adjacent pixels have identical\n intensities), the coordinates of all such pixels are returned.\n\n If both `threshold_abs` and `threshold_rel` are provided, the maximum\n of the two is chosen as the minimum intensity threshold of peaks.\n\n Parameters\n ----------\n image : ndarray\n Input image.\n min_distance : int, optional\n Minimum number of pixels separating peaks in a region of `2 *\n min_distance + 1` (i.e. peaks are separated by at least\n `min_distance`).\n To find the maximum number of peaks, use `min_distance=1`.\n threshold_abs : float, optional\n Minimum intensity of peaks. By default, the absolute threshold is\n the minimum intensity of the image.\n threshold_rel : float, optional\n Minimum intensity of peaks, calculated as `max(image) * threshold_rel`.\n exclude_border : int, optional\n If nonzero, `exclude_border` excludes peaks from\n within `exclude_border`-pixels of the border of the image.\n indices : bool, optional\n If True, the output will be an array representing peak\n coordinates. If False, the output will be a boolean array shaped as\n `image.shape` with peaks present at True elements.\n num_peaks : int, optional\n Maximum number of peaks. When the number of peaks exceeds `num_peaks`,\n return `num_peaks` peaks based on highest peak intensity.\n footprint : ndarray of bools, optional\n If provided, `footprint == 1` represents the local region within which\n to search for peaks at every point in `image`. Overrides\n `min_distance` (also for `exclude_border`).\n labels : ndarray of ints, optional\n If provided, each unique region `labels == value` represents a unique\n region to search for peaks. Zero is reserved for background.\n num_peaks_per_label : int, optional\n Maximum number of peaks for each label.\n\n Returns\n -------\n output : ndarray or ndarray of bools\n\n * If `indices = True` : (row, column, ...) coordinates of peaks.\n * If `indices = False` : Boolean array shaped like `image`, with peaks\n represented by True values.\n\n Notes\n -----\n The peak local maximum function returns the coordinates of local peaks\n (maxima) in an image. A maximum filter is used for finding local maxima.\n This operation dilates the original image. After comparison of the dilated\n and original image, this function returns the coordinates or a mask of the\n peaks where the dilated image equals the original image.\n\n Examples\n --------\n >>> img1 = np.zeros((7, 7))\n >>> img1[3, 4] = 1\n >>> img1[3, 2] = 1.5\n >>> img1\n array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 1.5, 0. , 1. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])\n\n >>> peak_local_max(img1, min_distance=1)\n array([[3, 4],\n [3, 2]])\n\n >>> peak_local_max(img1, min_distance=2)\n array([[3, 2]])\n\n >>> img2 = np.zeros((20, 20, 20))\n >>> img2[10, 10, 10] = 1\n >>> peak_local_max(img2, exclude_border=0)\n array([[10, 10, 10]])\n\n \"\"\"\n if type(exclude_border) == bool:\n exclude_border = min_distance if exclude_border else 0\n\n out = np.zeros_like(image, dtype=np.bool)\n\n # In the case of labels, recursively build and return an output\n # operating on each label separately\n if labels is not None:\n label_values = np.unique(labels)\n # Reorder label values to have consecutive integers (no gaps)\n if np.any(np.diff(label_values) != 1):\n mask = labels >= 1\n labels[mask] = 1 + rank_order(labels[mask])[0].astype(labels.dtype)\n labels = labels.astype(np.int32)\n\n # New values for new ordering\n label_values = np.unique(labels)\n for label in label_values[label_values != 0]:\n maskim = (labels == label)\n out += peak_local_max(image * maskim, min_distance=min_distance,\n threshold_abs=threshold_abs,\n threshold_rel=threshold_rel,\n exclude_border=exclude_border,\n indices=False, num_peaks=num_peaks_per_label,\n footprint=footprint, labels=None)\n\n # Select highest intensities (num_peaks)\n coordinates = _get_high_intensity_peaks(image, out, num_peaks)\n\n if indices is True:\n return coordinates\n else:\n nd_indices = tuple(coordinates.T)\n out[nd_indices] = True\n return out\n\n if np.all(image == image.flat[0]):\n if indices is True:\n return np.empty((0, 2), np.int)\n else:\n return out\n\n # Non maximum filter\n if footprint is not None:\n image_max = ndi.maximum_filter(image, footprint=footprint,\n mode='constant')\n else:\n size = 2 * min_distance + 1\n image_max = ndi.maximum_filter(image, size=size, mode='constant')\n mask = image == image_max\n\n if exclude_border:\n # zero out the image borders\n for i in range(mask.ndim):\n mask = mask.swapaxes(0, i)\n remove = (footprint.shape[i] if footprint is not None\n else 2 * exclude_border)\n mask[:remove // 2] = mask[-remove // 2:] = False\n mask = mask.swapaxes(0, i)\n\n # find top peak candidates above a threshold\n thresholds = []\n if threshold_abs is None:\n threshold_abs = image.min()\n thresholds.append(threshold_abs)\n if threshold_rel is not None:\n thresholds.append(threshold_rel * image.max())\n if thresholds:\n mask &= image > max(thresholds)\n\n # Select highest intensities (num_peaks)\n coordinates = _get_high_intensity_peaks(image, mask, num_peaks)\n\n if indices is True:\n return coordinates\n else:\n nd_indices = tuple(coordinates.T)\n out[nd_indices] = True\n return out\n\n\ndef _prominent_peaks(image, min_xdistance=1, min_ydistance=1,\n threshold=None, num_peaks=np.inf):\n \"\"\"Return peaks with non-maximum suppression.\n\n Identifies most prominent features separated by certain distances.\n Non-maximum suppression with different sizes is applied separately\n in the first and second dimension of the image to identify peaks.\n\n Parameters\n ----------\n image : (M, N) ndarray\n Input image.\n min_xdistance : int\n Minimum distance separating features in the x dimension.\n min_ydistance : int\n Minimum distance separating features in the y dimension.\n threshold : float\n Minimum intensity of peaks. Default is `0.5 * max(image)`.\n num_peaks : int\n Maximum number of peaks. When the number of peaks exceeds `num_peaks`,\n return `num_peaks` coordinates based on peak intensity.\n\n Returns\n -------\n intensity, xcoords, ycoords : tuple of array\n Peak intensity values, x and y indices.\n \"\"\"\n\n img = image.copy()\n rows, cols = img.shape\n\n if threshold is None:\n threshold = 0.5 * np.max(img)\n\n ycoords_size = 2 * min_ydistance + 1\n xcoords_size = 2 * min_xdistance + 1\n img_max = ndi.maximum_filter1d(img, size=ycoords_size, axis=0,\n mode='constant', cval=0)\n img_max = ndi.maximum_filter1d(img_max, size=xcoords_size, axis=1,\n mode='constant', cval=0)\n mask = (img == img_max)\n img *= mask\n img_t = img > threshold\n\n label_img = measure.label(img_t)\n props = measure.regionprops(label_img, img_max)\n\n # Sort the list of peaks by intensity, not left-right, so larger peaks\n # in Hough space cannot be arbitrarily suppressed by smaller neighbors\n props = sorted(props, key=lambda x: x.max_intensity)[::-1]\n coords = np.array([np.round(p.centroid) for p in props], dtype=int)\n\n img_peaks = []\n ycoords_peaks = []\n xcoords_peaks = []\n\n # relative coordinate grid for local neighbourhood suppression\n ycoords_ext, xcoords_ext = np.mgrid[-min_ydistance:min_ydistance + 1,\n -min_xdistance:min_xdistance + 1]\n\n for ycoords_idx, xcoords_idx in coords:\n accum = img_max[ycoords_idx, xcoords_idx]\n if accum > threshold:\n # absolute coordinate grid for local neighbourhood suppression\n ycoords_nh = ycoords_idx + ycoords_ext\n xcoords_nh = xcoords_idx + xcoords_ext\n\n # no reflection for distance neighbourhood\n ycoords_in = np.logical_and(ycoords_nh > 0, ycoords_nh < rows)\n ycoords_nh = ycoords_nh[ycoords_in]\n xcoords_nh = xcoords_nh[ycoords_in]\n\n # reflect xcoords and assume xcoords are continuous,\n # e.g. for angles:\n # (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)\n xcoords_low = xcoords_nh < 0\n ycoords_nh[xcoords_low] = rows - ycoords_nh[xcoords_low]\n xcoords_nh[xcoords_low] += cols\n xcoords_high = xcoords_nh >= cols\n ycoords_nh[xcoords_high] = rows - ycoords_nh[xcoords_high]\n xcoords_nh[xcoords_high] -= cols\n\n # suppress neighbourhood\n img_max[ycoords_nh, xcoords_nh] = 0\n\n # add current feature to peaks\n img_peaks.append(accum)\n ycoords_peaks.append(ycoords_idx)\n xcoords_peaks.append(xcoords_idx)\n\n img_peaks = np.array(img_peaks)\n ycoords_peaks = np.array(ycoords_peaks)\n xcoords_peaks = np.array(xcoords_peaks)\n\n if num_peaks < len(img_peaks):\n idx_maxsort = np.argsort(img_peaks)[::-1][:num_peaks]\n img_peaks = img_peaks[idx_maxsort]\n ycoords_peaks = ycoords_peaks[idx_maxsort]\n xcoords_peaks = xcoords_peaks[idx_maxsort]\n\n return img_peaks, xcoords_peaks, ycoords_peaks\n" ]
[ [ "numpy.zeros_like", "numpy.ones", "numpy.zeros", "numpy.random.seed", "numpy.testing.assert_array_equal", "numpy.copy", "numpy.random.randn", "numpy.arange", "numpy.random.random", "numpy.max", "numpy.random.normal", "numpy.concatenate" ], [ "numpy.ones", "numpy.random.standard_normal", "numpy.zeros", "numpy.conj", "numpy.abs", "numpy.log", "numpy.prod", "numpy.sqrt", "numpy.iscomplexobj" ], [ "numpy.zeros_like", "numpy.transpose", "numpy.empty", "scipy.ndimage.maximum_filter", "numpy.round", "numpy.diff", "numpy.logical_and", "numpy.argsort", "numpy.column_stack", "numpy.all", "numpy.max", "scipy.ndimage.maximum_filter1d", "numpy.array", "numpy.nonzero", "numpy.unique" ] ]
JHuang-CV/OD
[ "290bf90a5f210199b6a3750c88152f7dd2fbc276" ]
[ "mmdet/models/necks/mscatfpn.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import xavier_init\n\nfrom mmdet.core import auto_fp16\nfrom ..registry import NECKS\nfrom ..utils import ConvModule\nfrom mmdet.ops.context_block import ContextBlock\n\nfrom mmdet.models.plugins.squeeze_excitation import ChannelSELayer\n\n\[email protected]_module\nclass MSCATFPN(nn.Module):\n\n def __init__(self,\n in_channels,\n out_channels,\n num_outs,\n start_level=0,\n end_level=-1,\n add_extra_convs=False,\n extra_convs_on_inputs=True,\n relu_before_extra_convs=False,\n no_norm_on_lateral=False,\n conv_cfg=None,\n norm_cfg=None,\n activation=None):\n super(MSCATFPN, self).__init__()\n assert isinstance(in_channels, list)\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.num_ins = len(in_channels)\n self.num_outs = num_outs\n self.activation = activation\n self.relu_before_extra_convs = relu_before_extra_convs\n self.no_norm_on_lateral = no_norm_on_lateral\n self.norm_cfg = norm_cfg\n self.fp16_enabled = False\n\n self.epsilon = 1e-4\n\n self.se = ChannelSELayer(768)\n\n if end_level == -1:\n self.backbone_end_level = self.num_ins\n assert num_outs >= self.num_ins - start_level\n else:\n # if end_level < inputs, no extra level is allowed\n self.backbone_end_level = end_level\n assert end_level <= len(in_channels)\n assert num_outs == end_level - start_level\n self.start_level = start_level\n self.end_level = end_level\n self.add_extra_convs = add_extra_convs\n self.extra_convs_on_inputs = extra_convs_on_inputs\n\n self.lateral_convs = nn.ModuleList()\n self.fpn_convs = nn.ModuleList()\n self.cat_convs = nn.ModuleList()\n self.add_convs = nn.ModuleList()\n #self.gc_block = nn.ModuleList()\n\n self.relu = nn.ReLU()\n\n self.gc_block1 = ContextBlock(inplanes=256, ratio=1./4.)\n self.gc_block2 = ContextBlock(inplanes=256, ratio=1. / 4.)\n\n self.scat_conv = ConvModule(\n out_channels * (self.backbone_end_level-self.start_level),\n out_channels,\n 3,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n activation=self.activation,\n inplace=False)\n\n self.c3_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)\n self.c4_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)\n self.c5_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)\n\n for i in range(self.start_level, self.backbone_end_level):\n l_conv = ConvModule(\n in_channels[i],\n out_channels,\n 1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,\n activation=self.activation,\n inplace=False)\n cat_conv = ConvModule(\n out_channels * (self.backbone_end_level-self.start_level),\n out_channels,\n 3,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n activation=self.activation,\n inplace=False)\n add_conv = ConvModule(\n out_channels,\n out_channels,\n 3,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n activation=self.activation,\n inplace=False)\n self.cat_convs.append(cat_conv)\n self.lateral_convs.append(l_conv)\n self.add_convs.append(add_conv)\n\n #self.gc_block.append(ContextBlock(inplanes=256, ratio=1./4.))\n # add extra conv layers (e.g., RetinaNet)\n extra_levels = num_outs - self.backbone_end_level + self.start_level\n if add_extra_convs and extra_levels >= 1:\n for i in range(extra_levels):\n if i == 0 and self.extra_convs_on_inputs:\n in_channels = self.in_channels[self.backbone_end_level - 1]\n else:\n in_channels = out_channels\n extra_fpn_conv = ConvModule(\n in_channels,\n out_channels,\n 3,\n stride=2,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n activation=self.activation,\n inplace=False)\n self.fpn_convs.append(extra_fpn_conv)\n\n # default init_weights for conv(msra) and norm in ConvModule\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n\n @auto_fp16()\n def forward(self, inputs):\n assert len(inputs) == len(self.in_channels)\n\n # build laterals\n laterals = [\n lateral_conv(inputs[i + self.start_level])\n for i, lateral_conv in enumerate(self.lateral_convs)\n ]\n\n used_backbone_levels = len(laterals)\n\n mulscale_per_level = []\n for i in range(used_backbone_levels):\n level = []\n m = i - 0\n n = used_backbone_levels - 1 - i\n level.append(laterals[i])\n for x in range(m):\n level.insert(0, F.interpolate(level[0], scale_factor=2, mode='nearest'))\n for y in range(n):\n level.append(F.max_pool2d(level[-1], 2, stride=2))\n mulscale_per_level.append(level)\n sglscale_per_level = list(zip(*mulscale_per_level))\n feat_cat = [torch.cat(scale, 1)for scale in sglscale_per_level]\n #channel_se = [self.se(cat_ft) for cat_ft in feat_cat]\n mcat = [cat_conv(feat_cat[i]) for i, cat_conv in enumerate(self.cat_convs)]\n #outs = [gc(outs[i]) for i, gc in enumerate(self.gc_block)]\n mcat = [self.gc_block1(ft) for ft in mcat]\n\n single_list = []\n level = used_backbone_levels // 2\n\n for i in range(used_backbone_levels):\n if i < level:\n single_list.append(F.max_pool2d(laterals[i], 2, stride=2))\n elif i == level:\n single_list.append(laterals[i])\n else:\n single_list.append(F.interpolate(laterals[i], scale_factor=2, mode='nearest'))\n\n single_cat = torch.cat(single_list, 1)\n single_cat = self.scat_conv(single_cat)\n single_cat = self.gc_block2(single_cat)\n\n m = level - 0\n n = used_backbone_levels - 1 - level\n scat = [single_cat]\n for x in range(m):\n scat.insert(0, F.interpolate(scat[0], scale_factor=2, mode='nearest'))\n for y in range(n):\n scat.append(F.max_pool2d(scat[-1], 2, stride=2))\n\n # outs = [scat[i]+lateral for i, lateral in enumerate(laterals)]\n # outs = [add_conv(outs[i]) for i, add_conv in enumerate(self.add_convs)]\n\n outs = []\n for i, (m, s, l) in enumerate(zip(mcat, scat, laterals)):\n outs.append(\n self.add_convs[i](m.sigmoid()*s/2 + l / 2)\n )\n\n if self.num_outs > used_backbone_levels:\n if not self.add_extra_convs:\n for i in range(self.num_outs - used_backbone_levels):\n outs.append(F.max_pool2d(outs[-1], 1, stride=2))\n else:\n if self.extra_convs_on_inputs:\n orig = inputs[self.backbone_end_level - 1]\n outs.append(self.fpn_convs[0](orig))\n else:\n outs.append(self.fpn_convs[0](outs[-1]))\n for i in range(1, self.num_outs-used_backbone_levels):\n if self.relu_before_extra_convs:\n outs.append(self.fpn_convs[i](F.relu(outs[-1])))\n else:\n outs.append(self.fpn_convs[i](outs[-1]))\n return tuple(outs)\n" ]
[ [ "torch.ones", "torch.nn.functional.max_pool2d", "torch.nn.functional.relu", "torch.nn.ModuleList", "torch.nn.ReLU", "torch.cat", "torch.nn.functional.interpolate" ] ]
sjforeman/draco
[ "b0ab40b6984637642b28a5485af1c09c9cf183f2" ]
[ "draco/core/io.py" ]
[ "\"\"\"Tasks for reading and writing data.\n\nTasks\n=====\n\n.. autosummary::\n :toctree:\n\n LoadFiles\n LoadMaps\n LoadFilesFromParams\n Save\n Print\n LoadBeamTransfer\n\nFile Groups\n===========\n\nSeveral tasks accept groups of files as arguments. These are specified in the YAML file as a dictionary like below.\n\n.. code-block:: yaml\n\n list_of_file_groups:\n - tag: first_group # An optional tag naming the group\n files:\n - 'file1.h5'\n - 'file[3-4].h5' # Globs are processed\n - 'file7.h5'\n\n - files: # No tag specified, implicitly gets the tag 'group_2'\n - 'another_file1.h5'\n - 'another_file2.h5'\n\n\n single_group:\n files: ['file1.h5', 'file2.h5']\n\"\"\"\n\nimport os.path\n\nimport h5py\nimport numpy as np\nfrom yaml import dump as yamldump\n\nfrom caput import pipeline\nfrom caput import config\n\nfrom cora.util import units\n\nfrom . import task\nfrom ..util.truncate import bit_truncate_weights, bit_truncate_fixed\nfrom .containers import SiderealStream, TimeStream, TrackBeam\n\n\nTRUNC_SPEC = {\n SiderealStream: {\n \"dataset\": [\"vis\", \"vis_weight\"],\n \"weight_dataset\": [\"vis_weight\", None],\n \"fixed_precision\": 1e-4,\n \"variance_increase\": 1e-3,\n },\n TimeStream: {\n \"dataset\": [\"vis\", \"vis_weight\"],\n \"weight_dataset\": [\"vis_weight\", None],\n \"fixed_precision\": 1e-4,\n \"variance_increase\": 1e-3,\n },\n TrackBeam: {\n \"dataset\": [\"beam\", \"weight\"],\n \"weight_dataset\": [\"weight\", None],\n \"fixed_precision\": 1e-4,\n \"variance_increase\": 1e-3,\n },\n}\n\n\ndef _list_of_filelists(files):\n # Take in a list of lists/glob patterns of filenames\n import glob\n\n f2 = []\n\n for filelist in files:\n\n if isinstance(filelist, str):\n filelist = glob.glob(filelist)\n elif isinstance(filelist, list):\n pass\n else:\n raise Exception(\"Must be list or glob pattern.\")\n f2.append(filelist)\n\n return f2\n\n\ndef _list_or_glob(files):\n # Take in a list of lists/glob patterns of filenames\n import glob\n\n if isinstance(files, str):\n files = sorted(glob.glob(files))\n elif isinstance(files, list):\n pass\n else:\n raise ValueError(\"Argument must be list or glob pattern, got %s\" % repr(files))\n\n return files\n\n\ndef _list_of_filegroups(groups):\n # Process a file group/groups\n import glob\n\n # Convert to list if the group was not included in a list\n if not isinstance(groups, list):\n groups = [groups]\n\n # Iterate over groups, set the tag if needed, and process the file list\n # through glob\n for gi, group in enumerate(groups):\n\n files = group[\"files\"]\n\n if \"tag\" not in group:\n group[\"tag\"] = \"group_%i\" % gi\n\n flist = []\n\n for fname in files:\n flist += glob.glob(fname)\n\n if not len(flist):\n raise RuntimeError(\"No files in group exist (%s).\" % files)\n\n group[\"files\"] = flist\n\n return groups\n\n\nclass LoadMaps(task.MPILoggedTask):\n \"\"\"Load a series of maps from files given in the tasks parameters.\n\n Maps are given as one, or a list of `File Groups` (see\n :mod:`draco.core.io`). Maps within the same group are added together\n before being passed on.\n\n Attributes\n ----------\n maps : list or dict\n A dictionary specifying a file group, or a list of them.\n \"\"\"\n\n maps = config.Property(proptype=_list_of_filegroups)\n\n def next(self):\n \"\"\"Load the groups of maps from disk and pass them on.\n\n Returns\n -------\n map : :class:`containers.Map`\n \"\"\"\n\n from . import containers\n\n # Exit this task if we have eaten all the file groups\n if len(self.maps) == 0:\n raise pipeline.PipelineStopIteration\n\n group = self.maps.pop(0)\n\n map_stack = None\n\n # Iterate over all the files in the group, load them into a Map\n # container and add them all together\n for mfile in group[\"files\"]:\n\n self.log.debug(\"Loading file %s\", mfile)\n\n current_map = containers.Map.from_file(mfile, distributed=True)\n current_map.redistribute(\"freq\")\n\n # Start the stack if needed\n if map_stack is None:\n map_stack = current_map\n\n # Otherwise, check that the new map has consistent frequencies,\n # nside and pol and stack up.\n else:\n\n if (current_map.freq != map_stack.freq).all():\n raise RuntimeError(\"Maps do not have consistent frequencies.\")\n\n if (current_map.index_map[\"pol\"] != map_stack.index_map[\"pol\"]).all():\n raise RuntimeError(\"Maps do not have the same polarisations.\")\n\n if (\n current_map.index_map[\"pixel\"] != map_stack.index_map[\"pixel\"]\n ).all():\n raise RuntimeError(\"Maps do not have the same pixelisation.\")\n\n map_stack.map[:] += current_map.map[:]\n\n # Assign a tag to the stack of maps\n map_stack.attrs[\"tag\"] = group[\"tag\"]\n\n return map_stack\n\n\nclass LoadFITSCatalog(task.SingleTask):\n \"\"\"Load an SDSS-style FITS source catalog.\n\n Catalogs are given as one, or a list of `File Groups` (see\n :mod:`draco.core.io`). Catalogs within the same group are combined together\n before being passed on.\n\n Attributes\n ----------\n catalogs : list or dict\n A dictionary specifying a file group, or a list of them.\n z_range : list, optional\n Select only sources with a redshift within the given range.\n freq_range : list, optional\n Select only sources with a 21cm line freq within the given range. Overrides\n `z_range`.\n \"\"\"\n\n catalogs = config.Property(proptype=_list_of_filegroups)\n z_range = config.list_type(type_=float, length=2, default=None)\n freq_range = config.list_type(type_=float, length=2, default=None)\n\n def process(self):\n \"\"\"Load the groups of catalogs from disk, concatenate them and pass them on.\n\n Returns\n -------\n catalog : :class:`containers.SpectroscopicCatalog`\n \"\"\"\n\n from astropy.io import fits\n from . import containers\n\n # Exit this task if we have eaten all the file groups\n if len(self.catalogs) == 0:\n raise pipeline.PipelineStopIteration\n\n group = self.catalogs.pop(0)\n\n # Set the redshift selection\n if self.freq_range:\n zl = units.nu21 / self.freq_range[1] - 1\n zh = units.nu21 / self.freq_range[0] - 1\n self.z_range = (zl, zh)\n\n if self.z_range:\n zl, zh = self.z_range\n self.log.info(f\"Applying redshift selection {zl:.2f} <= z <= {zh:.2f}\")\n\n # Load the data only on rank=0 and then broadcast\n if self.comm.rank == 0:\n # Iterate over all the files in the group, load them into a Map\n # container and add them all together\n catalog_stack = []\n for cfile in group[\"files\"]:\n\n self.log.debug(\"Loading file %s\", cfile)\n\n # TODO: read out the weights from the catalogs\n with fits.open(cfile, mode=\"readonly\") as cat:\n pos = np.array([cat[1].data[col] for col in [\"RA\", \"DEC\", \"Z\"]])\n\n # Apply any redshift selection to the objects\n if self.z_range:\n zsel = (pos[2] >= self.z_range[0]) & (pos[2] <= self.z_range[1])\n pos = pos[:, zsel]\n\n catalog_stack.append(pos)\n\n # NOTE: this one is tricky, for some reason the concatenate in here\n # produces a non C contiguous array, so we need to ensure that otherwise\n # the broadcasting will get very confused\n catalog_array = np.concatenate(catalog_stack, axis=-1).astype(np.float64)\n catalog_array = np.ascontiguousarray(catalog_array)\n num_objects = catalog_array.shape[-1]\n else:\n num_objects = None\n catalog_array = None\n\n # Broadcast the size of the catalog to all ranks, create the target array and\n # broadcast into it\n num_objects = self.comm.bcast(num_objects, root=0)\n self.log.debug(f\"Constructing catalog with {num_objects} objects.\")\n\n if self.comm.rank != 0:\n catalog_array = np.zeros((3, num_objects), dtype=np.float64)\n self.comm.Bcast(catalog_array, root=0)\n\n catalog = containers.SpectroscopicCatalog(object_id=num_objects)\n catalog[\"position\"][\"ra\"] = catalog_array[0]\n catalog[\"position\"][\"dec\"] = catalog_array[1]\n catalog[\"redshift\"][\"z\"] = catalog_array[2]\n catalog[\"redshift\"][\"z_error\"] = 0\n\n # Assign a tag to the stack of maps\n catalog.attrs[\"tag\"] = group[\"tag\"]\n\n return catalog\n\n\nclass LoadFilesFromParams(task.SingleTask):\n \"\"\"Load data from files given in the tasks parameters.\n\n Attributes\n ----------\n files : glob pattern, or list\n Can either be a glob pattern, or lists of actual files.\n distributed : bool, optional\n Whether the file should be loaded distributed across ranks.\n convert_strings : bool, optional\n Convert strings to unicode when loading.\n selections : dict, optional\n A dictionary of axis selections. See the section below for details.\n\n Selections\n ----------\n Selections can be given to limit the data read to specified subsets. They can be\n given for any named axis in the container.\n\n Selections can be given as a slice with an `<axis name>_range` key with either\n `[start, stop]` or `[start, stop, step]` as the value. Alternatively a list of\n explicit indices to extract can be given with the `<axis name>_index` key, and\n the value is a list of the indices. If both `<axis name>_range` and `<axis\n name>_index` keys are given the former will take precedence, but you should\n clearly avoid doing this.\n\n Additionally index based selections currently don't work for distributed reads.\n\n Here's an example in the YAML format that the pipeline uses:\n\n .. code-block:: yaml\n\n selections:\n freq_range: [256, 512, 4] # A strided slice\n stack_index: [1, 2, 4, 9, 16, 25, 36, 49, 64] # A sparse selection\n stack_range: [1, 14] # Will override the selection above\n \"\"\"\n\n files = config.Property(proptype=_list_or_glob)\n distributed = config.Property(proptype=bool, default=True)\n convert_strings = config.Property(proptype=bool, default=True)\n selections = config.Property(proptype=dict, default=None)\n\n def setup(self):\n \"\"\"Resolve the selections.\"\"\"\n self._sel = self._resolve_sel()\n\n def process(self):\n \"\"\"Load the given files in turn and pass on.\n\n Returns\n -------\n cont : subclass of `memh5.BasicCont`\n \"\"\"\n\n from caput import memh5\n\n # Garbage collect to workaround leaking memory from containers.\n # TODO: find actual source of leak\n import gc\n\n gc.collect()\n\n if len(self.files) == 0:\n raise pipeline.PipelineStopIteration\n\n # Fetch and remove the first item in the list\n file_ = self.files.pop(0)\n\n self.log.info(f\"Loading file {file_}\")\n self.log.debug(f\"Reading with selections: {self._sel}\")\n\n # If we are applying selections we need to dispatch the `from_file` via the\n # correct subclass, rather than relying on the internal detection of the\n # subclass. To minimise the number of files being opened this is only done on\n # rank=0 and is then broadcast\n if self._sel:\n if self.comm.rank == 0:\n with h5py.File(file_, \"r\") as fh:\n clspath = memh5.MemDiskGroup._detect_subclass_path(fh)\n else:\n clspath = None\n clspath = self.comm.bcast(clspath, root=0)\n new_cls = memh5.MemDiskGroup._resolve_subclass(clspath)\n else:\n new_cls = memh5.BasicCont\n\n cont = new_cls.from_file(\n file_,\n distributed=self.distributed,\n comm=self.comm,\n convert_attribute_strings=self.convert_strings,\n convert_dataset_strings=self.convert_strings,\n **self._sel,\n )\n\n if \"tag\" not in cont.attrs:\n # Get the first part of the actual filename and use it as the tag\n tag = os.path.splitext(os.path.basename(file_))[0]\n\n cont.attrs[\"tag\"] = tag\n\n return cont\n\n def _resolve_sel(self):\n # Turn the selection parameters into actual selectable types\n\n sel = {}\n\n sel_parsers = {\"range\": self._parse_range, \"index\": self._parse_index}\n\n # To enforce the precedence of range vs index selections, we rely on the fact\n # that a sort will place the axis_range keys after axis_index keys\n for k in sorted(self.selections or []):\n\n # Parse the key to get the axis name and type, accounting for the fact the\n # axis name may contain an underscore\n *axis, type_ = k.split(\"_\")\n axis_name = \"_\".join(axis)\n\n if type_ not in sel_parsers:\n raise ValueError(\n f'Unsupported selection type \"{type_}\", or invalid key \"{k}\"'\n )\n\n sel[f\"{axis_name}_sel\"] = sel_parsers[type_](self.selections[k])\n\n return sel\n\n def _parse_range(self, x):\n # Parse and validate a range type selection\n\n if not isinstance(x, (list, tuple)) or len(x) > 3 or len(x) < 2:\n raise ValueError(\n f\"Range spec must be a length 2 or 3 list or tuple. Got {x}.\"\n )\n\n for v in x:\n if not isinstance(v, int):\n raise ValueError(f\"All elements of range spec must be ints. Got {x}\")\n\n return slice(*x)\n\n def _parse_index(self, x):\n # Parse and validate an index type selection\n\n if not isinstance(x, (list, tuple)) or len(x) == 0:\n raise ValueError(f\"Index spec must be a non-empty list or tuple. Got {x}.\")\n\n for v in x:\n if not isinstance(v, int):\n raise ValueError(f\"All elements of index spec must be ints. Got {x}\")\n\n return list(x)\n\n\n# Define alias for old code\nLoadBasicCont = LoadFilesFromParams\n\n\nclass FindFiles(pipeline.TaskBase):\n \"\"\"Take a glob or list of files specified as a parameter in the\n configuration file and pass on to other tasks.\n\n Parameters\n ----------\n files : list or glob\n \"\"\"\n\n files = config.Property(proptype=_list_or_glob)\n\n def setup(self):\n \"\"\"Return list of files specified in the parameters.\"\"\"\n if not isinstance(self.files, (list, tuple)):\n raise RuntimeError(\"Argument must be list of files.\")\n\n return self.files\n\n\nclass LoadFiles(LoadFilesFromParams):\n \"\"\"Load data from files passed into the setup routine.\n\n File must be a serialised subclass of :class:`memh5.BasicCont`.\n \"\"\"\n\n files = None\n\n def setup(self, files):\n \"\"\"Set the list of files to load.\n\n Parameters\n ----------\n files : list\n \"\"\"\n\n # Call the baseclass setup to resolve any selections\n super().setup()\n\n if not isinstance(files, (list, tuple)):\n raise RuntimeError(f'Argument must be list of files. Got \"{files}\"')\n\n self.files = files\n\n\nclass Save(pipeline.TaskBase):\n \"\"\"Save out the input, and pass it on.\n\n Assumes that the input has a `to_hdf5` method. Appends a *tag* if there is\n a `tag` entry in the attributes, otherwise just uses a count.\n\n Attributes\n ----------\n root : str\n Root of the file name to output to.\n \"\"\"\n\n root = config.Property(proptype=str)\n\n count = 0\n\n def next(self, data):\n \"\"\"Write out the data file.\n\n Assumes it has an MPIDataset interface.\n\n Parameters\n ----------\n data : mpidataset.MPIDataset\n Data to write out.\n \"\"\"\n\n if \"tag\" not in data.attrs:\n tag = self.count\n self.count += 1\n else:\n tag = data.attrs[\"tag\"]\n\n fname = \"%s_%s.h5\" % (self.root, str(tag))\n\n data.to_hdf5(fname)\n\n return data\n\n\nclass Print(pipeline.TaskBase):\n \"\"\"Stupid module which just prints whatever it gets. Good for debugging.\"\"\"\n\n def next(self, input_):\n\n print(input_)\n\n return input_\n\n\nclass LoadBeamTransfer(pipeline.TaskBase):\n \"\"\"Loads a beam transfer manager from disk.\n\n Attributes\n ----------\n product_directory : str\n Path to the saved Beam Transfer products.\n \"\"\"\n\n product_directory = config.Property(proptype=str)\n\n def setup(self):\n \"\"\"Load the beam transfer matrices.\n\n Returns\n -------\n tel : TransitTelescope\n Object describing the telescope.\n bt : BeamTransfer\n BeamTransfer manager.\n feed_info : list, optional\n Optional list providing additional information about each feed.\n \"\"\"\n\n import os\n\n from drift.core import beamtransfer\n\n if not os.path.exists(self.product_directory):\n raise RuntimeError(\"BeamTransfers do not exist.\")\n\n bt = beamtransfer.BeamTransfer(self.product_directory)\n\n tel = bt.telescope\n\n try:\n return tel, bt, tel.feeds\n except AttributeError:\n return tel, bt\n\n\nclass LoadProductManager(pipeline.TaskBase):\n \"\"\"Loads a driftscan product manager from disk.\n\n Attributes\n ----------\n product_directory : str\n Path to the root of the products. This is the same as the output\n directory used by ``drift-makeproducts``.\n \"\"\"\n\n product_directory = config.Property(proptype=str)\n\n def setup(self):\n \"\"\"Load the beam transfer matrices.\n\n Returns\n -------\n manager : ProductManager\n Object describing the telescope.\n \"\"\"\n\n import os\n\n from drift.core import manager\n\n if not os.path.exists(self.product_directory):\n raise RuntimeError(\"Products do not exist.\")\n\n # Load ProductManager and Timestream\n pm = manager.ProductManager.from_config(self.product_directory)\n\n return pm\n\n\nclass Truncate(task.SingleTask):\n \"\"\"Precision truncate data prior to saving with bitshuffle compression.\n\n If no configuration is provided, will look for preset values for the\n input container. Any properties defined in the config will override the\n presets.\n\n If available, each specified dataset will be truncated relative to a\n (specified) weight dataset with the truncation increasing the variance up\n to the specified maximum in `variance_increase`. If there is no specified\n weight dataset then the truncation falls back to using the\n `fixed_precision`.\n\n Attributes\n ----------\n dataset : list of str\n Datasets to truncate.\n weight_dataset : list of str\n Datasets to use as inverse variance for truncation precision.\n fixed_precision : float\n Relative precision to truncate to (default 1e-4).\n variance_increase : float\n Maximum fractional increase in variance from numerical truncation.\n \"\"\"\n\n dataset = config.Property(proptype=list, default=None)\n weight_dataset = config.Property(proptype=list, default=None)\n fixed_precision = config.Property(proptype=float, default=None)\n variance_increase = config.Property(proptype=float, default=None)\n\n def _get_params(self, container):\n \"\"\"Load truncation parameters from config or container defaults.\"\"\"\n if container in TRUNC_SPEC:\n self.log.info(\"Truncating from preset for container {}\".format(container))\n for key in [\n \"dataset\",\n \"weight_dataset\",\n \"fixed_precision\",\n \"variance_increase\",\n ]:\n attr = getattr(self, key)\n if attr is None:\n setattr(self, key, TRUNC_SPEC[container][key])\n else:\n self.log.info(\"Overriding container default for '{}'.\".format(key))\n else:\n if (\n self.dataset is None\n or self.fixed_precision is None\n or self.variance_increase is None\n ):\n raise pipeline.PipelineConfigError(\n \"Container {} has no preset values. You must define all of 'dataset', \"\n \"'fixed_precision', and 'variance_increase' properties.\".format(\n container\n )\n )\n # Factor of 3 for variance over uniform distribution of truncation errors\n self.variance_increase *= 3\n\n def process(self, data):\n \"\"\"Truncate the incoming data.\n\n The truncation is done *in place*.\n\n Parameters\n ----------\n data : containers.ContainerBase\n Data to truncate.\n\n Returns\n -------\n truncated_data : containers.ContainerBase\n Truncated data.\n \"\"\"\n # get truncation parameters from config or container defaults\n self._get_params(type(data))\n\n if self.weight_dataset is None:\n self.weight_dataset = [None] * len(self.dataset)\n\n for dset, wgt in zip(self.dataset, self.weight_dataset):\n old_shape = data[dset].local_shape\n val = np.ndarray.reshape(data[dset][:], data[dset][:].size)\n if wgt is None:\n if np.iscomplexobj(data[dset]):\n data[dset][:].real = bit_truncate_fixed(\n val.real, self.fixed_precision\n ).reshape(old_shape)\n data[dset][:].imag = bit_truncate_fixed(\n val.imag, self.fixed_precision\n ).reshape(old_shape)\n else:\n data[dset][:] = bit_truncate_fixed(\n val, self.fixed_precision\n ).reshape(old_shape)\n else:\n if data[dset][:].shape != data[wgt][:].shape:\n raise pipeline.PipelineRuntimeError(\n \"Dataset and weight arrays must have same shape ({} != {})\".format(\n data[dset].shape, data[wgt].shape\n )\n )\n invvar = np.ndarray.reshape(data[wgt][:], data[dset][:].size)\n if np.iscomplexobj(data[dset]):\n data[dset][:].real = bit_truncate_weights(\n val.real,\n invvar * 2.0 / self.variance_increase,\n self.fixed_precision,\n ).reshape(old_shape)\n data[dset][:].imag = bit_truncate_weights(\n val.imag,\n invvar * 2.0 / self.variance_increase,\n self.fixed_precision,\n ).reshape(old_shape)\n else:\n data[dset][:] = bit_truncate_weights(\n val, invvar / self.variance_increase, self.fixed_precision\n ).reshape(old_shape)\n\n return data\n\n\nclass SaveModuleVersions(task.SingleTask):\n \"\"\"Write module versions to a YAML file.\n\n The list of modules should be added to the configuration under key 'save_versions'.\n The version strings are written to a YAML file.\n\n Attributes\n ----------\n root : str\n Root of the file name to output to.\n \"\"\"\n\n root = config.Property(proptype=str)\n\n done = True\n\n def setup(self):\n \"\"\"Save module versions.\"\"\"\n\n fname = \"{}_versions.yml\".format(self.root)\n f = open(fname, \"w\")\n f.write(yamldump(self.versions))\n f.close()\n self.done = True\n\n def process(self):\n \"\"\"Do nothing.\"\"\"\n self.done = True\n return\n\n\nclass SaveConfig(task.SingleTask):\n \"\"\"Write pipeline config to a text file.\n\n Yaml configuration document is written to a text file.\n\n Attributes\n ----------\n root : str\n Root of the file name to output to.\n \"\"\"\n\n root = config.Property(proptype=str)\n done = True\n\n def setup(self):\n \"\"\"Save module versions.\"\"\"\n\n fname = \"{}_config.yml\".format(self.root)\n f = open(fname, \"w\")\n f.write(yamldump(self.pipeline_config))\n f.close()\n self.done = True\n\n def process(self):\n \"\"\"Do nothing.\"\"\"\n self.done = True\n return\n\n\ndef get_telescope(obj):\n \"\"\"Return a telescope object out of the input (either `ProductManager`,\n `BeamTransfer` or `TransitTelescope`).\n \"\"\"\n from drift.core import telescope\n\n try:\n return get_beamtransfer(obj).telescope\n except RuntimeError:\n if isinstance(obj, telescope.TransitTelescope):\n return obj\n\n raise RuntimeError(\"Could not get telescope instance out of %s\" % repr(obj))\n\n\ndef get_beamtransfer(obj):\n \"\"\"Return a BeamTransfer object out of the input (either `ProductManager`,\n `BeamTransfer`).\n \"\"\"\n from drift.core import manager, beamtransfer\n\n if isinstance(obj, beamtransfer.BeamTransfer):\n return obj\n\n if isinstance(obj, manager.ProductManager):\n return obj.beamtransfer\n\n raise RuntimeError(\"Could not get BeamTransfer instance out of %s\" % repr(obj))\n" ]
[ [ "numpy.zeros", "numpy.concatenate", "numpy.array", "numpy.iscomplexobj", "numpy.ascontiguousarray", "numpy.ndarray.reshape" ] ]
adam-blinzler/simple-lane-detection
[ "8814e0aaf7ac56b7e5be59634e363ca17839effb" ]
[ "original_author_notes/yolo_video.py" ]
[ "# USAGE\n# python yolo_video.py --input videos/airport.mp4 --output output/airport_output.avi --object_detection object_detection-coco\n\n# import the necessary packages\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\nimport os\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--input\", required=True,\n\thelp=\"path to input video\")\nap.add_argument(\"-o\", \"--output\", required=True,\n\thelp=\"path to output video\")\nap.add_argument(\"-y\", \"--object_detection\", required=True,\n\thelp=\"base path to YOLO directory\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n\thelp=\"minimum probability to filter weak detections\")\nap.add_argument(\"-t\", \"--threshold\", type=float, default=0.3,\n\thelp=\"threshold when applyong non-maxima suppression\")\nargs = vars(ap.parse_args())\n\n# load the COCO class labels our YOLO model was trained on\nlabelsPath = os.path.sep.join([args[\"object_detection\"], \"coco.names\"])\nLABELS = open(labelsPath).read().strip().split(\"\\n\")\n\n# initialize a list of colors to represent each possible class label\nnp.random.seed(42)\nCOLORS = np.random.randint(0, 255, size=(len(LABELS), 3),\n\tdtype=\"uint8\")\n\n# derive the paths to the YOLO weights and model configuration\nweightsPath = os.path.sep.join([args[\"object_detection\"], \"yolov3.weights\"])\nconfigPath = os.path.sep.join([args[\"object_detection\"], \"yolov3.cfg\"])\n\n# load our YOLO object detector trained on COCO dataset (80 classes)\n# and determine only the *output* layer names that we need from YOLO\nprint(\"[INFO] loading YOLO from disk...\")\nnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\nln = net.getLayerNames()\nln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n# initialize the video stream, pointer to output video file, and\n# frame dimensions\nvs = cv2.VideoCapture(args[\"input\"])\nwriter = None\n(W, H) = (None, None)\n\n# try to determine the total number of frames in the video file\ntry:\n\tprop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \\\n\t\telse cv2.CAP_PROP_FRAME_COUNT\n\ttotal = int(vs.get(prop))\n\tprint(\"[INFO] {} total frames in video\".format(total))\n\n# an error occurred while trying to determine the total\n# number of frames in the video file\nexcept:\n\tprint(\"[INFO] could not determine # of frames in video\")\n\tprint(\"[INFO] no approx. completion time can be provided\")\n\ttotal = -1\n\n# loop over frames from the video file stream\nwhile True:\n\t# read the next frame from the file\n\t(grabbed, frame) = vs.read()\n\n\t# if the frame was not grabbed, then we have reached the end\n\t# of the stream\n\tif not grabbed:\n\t\tbreak\n\n\t# if the frame dimensions are empty, grab them\n\tif W is None or H is None:\n\t\t(H, W) = frame.shape[:2]\n\n\t# construct a blob from the input frame and then perform a forward\n\t# pass of the YOLO object detector, giving us our bounding boxes\n\t# and associated probabilities\n\tblob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),\n\t\tswapRB=True, crop=False)\n\tnet.setInput(blob)\n\tstart = time.time()\n\tlayerOutputs = net.forward(ln)\n\tend = time.time()\n\n\t# initialize our lists of detected bounding boxes, confidences,\n\t# and class IDs, respectively\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\n\t# loop over each of the layer outputs\n\tfor output in layerOutputs:\n\t\t# loop over each of the detections\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability)\n\t\t\t# of the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > args[\"confidence\"]:\n\t\t\t\t# scale the bounding box coordinates back relative to\n\t\t\t\t# the size of the image, keeping in mind that YOLO\n\t\t\t\t# actually returns the center (x, y)-coordinates of\n\t\t\t\t# the bounding box followed by the boxes' width and\n\t\t\t\t# height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top\n\t\t\t\t# and and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates,\n\t\t\t\t# confidences, and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping\n\t# bounding boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, args[\"confidence\"],\n\t\targs[\"threshold\"])\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\t# draw a bounding box rectangle and label on the frame\n\t\t\tcolor = [int(c) for c in COLORS[classIDs[i]]]\n\t\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\n\t\t\ttext = \"{}: {:.4f}\".format(LABELS[classIDs[i]],\n\t\t\t\tconfidences[i])\n\t\t\tcv2.putText(frame, text, (x, y - 5),\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\n\t# check if the video writer is None\n\tif writer is None:\n\t\t# initialize our video writer\n\t\tfourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n\t\twriter = cv2.VideoWriter(args[\"output\"], fourcc, 30,\n\t\t\t(frame.shape[1], frame.shape[0]), True)\n\n\t\t# some information on processing single frame\n\t\tif total > 0:\n\t\t\telap = (end - start)\n\t\t\tprint(\"[INFO] single frame took {:.4f} seconds\".format(elap))\n\t\t\tprint(\"[INFO] estimated total time to finish: {:.4f}\".format(\n\t\t\t\telap * total))\n\n\t# write the output frame to disk\n\twriter.write(frame)\n\n# release the file pointers\nprint(\"[INFO] cleaning up...\")\nwriter.release()\nvs.release()" ]
[ [ "numpy.array", "numpy.random.seed", "numpy.argmax" ] ]
vikashplus/MPL
[ "4a784fd94dc7a5988a1eca85851ee546ca1992f9" ]
[ "MPL/MPL_envs/reach/reach_v0.py" ]
[ "import numpy as np\nfrom gym import utils\nfrom mjrl.envs import mujoco_env\nfrom mujoco_py import MjViewer\nfrom MPL.MPL_robot.robot import Robot\nimport os\n\n# TODO: Action normalization is missing\n\nclass sallyReachEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self, noise_scale=0.0):\n\n # prep\n utils.EzPickle.__init__(self)\n self._noise_scale = noise_scale\n self.initializing = True\n curr_dir = os.path.dirname(os.path.abspath(__file__))\n self.Rtarget = 0\n self.Ltarget = 0\n self.Rgrasp = 0\n self.Lgrasp = 0\n \n # acquire robot\n self.mpl = Robot(name='sallyReach', model_file=curr_dir+'/reach_v0.xml', config_file=curr_dir+'/reach_v0.config')\n\n # acquire env\n mujoco_env.MujocoEnv.__init__(self, curr_dir+'/reach_v0.xml', 20)\n self.Rtarget = self.sim.model.site_name2id('Rtarget')\n self.Ltarget = self.sim.model.site_name2id('Ltarget')\n self.Rgrasp = self.sim.model.site_name2id('Rgrasp')\n self.Lgrasp = self.sim.model.site_name2id('Lgrasp')\n \n # env ready\n self.initializing = False\n\n\n def step(self, a):\n\n self.mpl.step(self, a, self.frame_skip*self.sim.model.opt.timestep)\n obs = self.get_obs()\n\n score, reward_dict, solved, done = self._get_score_reward_solved_done(self.obs_dict)\n\n # finalize step\n env_info = {\n 'time': self.obs_dict['t'],\n 'obs_dict': self.obs_dict,\n 'rewards': reward_dict,\n 'score': score,\n 'solved': solved\n }\n return obs, reward_dict['total'], done, env_info\n\n\n # query robot and populate observations\n def get_obs(self):\n\n # ask robot for sensor data\n sen = self.mpl.get_sensors(self, noise_scale=self._noise_scale)\n\n # parse sensor data into obs dict\n self.obs_dict = {}\n self.obs_dict['t'] = sen['time']\n self.obs_dict['Tmpl_pos'] = sen['Tmpl_pos']\n self.obs_dict['Rmpl_pos'] = sen['Rmpl_pos']\n self.obs_dict['Lmpl_pos'] = sen['Lmpl_pos']\n self.obs_dict['Tmpl_vel'] = sen['Tmpl_vel']\n self.obs_dict['Rmpl_vel'] = sen['Rmpl_vel']\n self.obs_dict['Lmpl_vel'] = sen['Lmpl_vel']\n self.obs_dict['Rerr'] = self.sim.data.site_xpos[self.Rtarget]-self.sim.data.site_xpos[self.Rgrasp]\n self.obs_dict['Lerr'] = self.sim.data.site_xpos[self.Ltarget]-self.sim.data.site_xpos[self.Lgrasp]\n\n # vectorize observations\n return np.concatenate([\n self.obs_dict['Tmpl_pos'],\n self.obs_dict['Rmpl_pos'],\n self.obs_dict['Lmpl_pos'],\n self.obs_dict['Tmpl_vel'],\n self.obs_dict['Rmpl_vel'],\n self.obs_dict['Lmpl_vel'],\n self.obs_dict['Lerr'],\n self.obs_dict['Rerr']])\n\n\n # evaluate observations\n def _get_score_reward_solved_done(self, obs, act=None):\n Ldist = np.linalg.norm(obs['Lerr'])\n Rdist = np.linalg.norm(obs['Rerr'])\n\n # print(Rdist, Ldist)\n done = (bool( Ldist > 1.0) or bool(Rdist>1.0)) \\\n if not self.initializing else False\n\n reward_dict = {}\n avg_dist = (Ldist+Rdist)/2.0\n score = -1.* avg_dist\n reward_dict[\"avg_dist\"] = score\n reward_dict[\"small_bonus\"] = 2.0*(Ldist<.1) + 2.0*(Rdist<.1)\n reward_dict[\"big_bonus\"] = 2.0*(Ldist<.1) * 2.0*(Rdist<.1)\n reward_dict[\"total\"] = reward_dict[\"avg_dist\"] + reward_dict[\"small_bonus\"] + reward_dict[\"big_bonus\"] - 50.0 * int(done) \n \n solved = bool(avg_dist<0.100)\n return score, reward_dict, solved, done\n\n\n # reset model\n def reset_model(self):\n raise NotImplementedError # for child class to define \n\n\n # evaluate a path\n def compute_path_rewards(self, paths):\n # path has two keys: observations and actions\n # path[\"observations\"] : (num_traj, horizon, obs_dim)\n # path[\"rewards\"] should have shape (num_traj, horizon)\n obs = paths[\"observations\"]\n score, rewards, done = self._get_score_reward_solved_done(obs)\n paths[\"rewards\"] = rewards if rewards.shape[0] > 1 else rewards.ravel()\n\n\n # evaluate policy's success from a collection of paths\n def evaluate_success(self, paths, logger=None):\n success = 0.0\n for p in paths:\n if np.mean(p['env_infos']['solved'][-4:]) > 0.0:\n success += 1.0\n success_rate = 100.0*success/len(paths)\n if logger is None:\n # nowhere to log so return the value\n return success_rate\n else:\n # log the success\n # can log multiple statistics here if needed\n logger.log_kv('success_rate', success_rate)\n return None\n\n # --------------------------------\n # get and set states\n # --------------------------------\n def get_env_state(self):\n return dict(qp=self.data.qpos.copy(), qv=self.data.qvel.copy())\n\n def set_env_state(self, state):\n self.sim.reset()\n qp = state['qp'].copy()\n qv = state['qv'].copy()\n self.set_state(qp, qv)\n self.sim.forward()\n\n # --------------------------------\n # utility functions\n # --------------------------------\n def get_env_infos(self):\n return dict(state=self.get_env_state())\n\n def mj_viewer_setup(self):\n self.viewer = MjViewer(self.sim)\n self.viewer.cam.azimuth = -90\n self.viewer.cam.distance = 2.5\n self.viewer.cam.elevation = -30\n\n self.sim.forward()\n\n def close_env(self):\n pass\n\n\n# Reach at fixed targets\nclass sallyReachEnvFixed(sallyReachEnv):\n def __init__(self):\n super().__init__()\n\n def reset_model(self):\n self.sim.model.site_pos[self.Rtarget] = np.array([0.15, 0.2, .6])\n self.sim.model.site_pos[self.Ltarget] = np.array([-0.15, 0.2, .3])\n self.set_state(self.init_qpos, self.init_qvel)\n self.sim.forward()\n return self.get_obs()\n\n# Reach at random targets\nclass sallyReachEnvRandom(sallyReachEnv):\n def __init__(self):\n super().__init__()\n\n def reset_model(self):\n self.sim.model.site_pos[self.Rtarget] = self.np_random.uniform(high=[0.5, .5, .6], low=[0, .1, .3])\n self.sim.model.site_pos[self.Ltarget] = self.np_random.uniform(high=[0, .5, .6], low=[-.5, .1, .3])\n self.set_state(self.init_qpos, self.init_qvel)\n self.sim.forward()\n return self.get_obs()\n" ]
[ [ "numpy.array", "numpy.concatenate", "numpy.linalg.norm", "numpy.mean" ] ]
ivankreso/semseg
[ "fcd2889a1e9e03c3d1a71d19d68d15ce25a5dc79" ]
[ "OLD/losses.py" ]
[ "import tensorflow as tf\nimport slim\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef add_loss_summaries(total_loss):\n \"\"\"Add summaries for losses in model.\n\n Generates moving average for all losses and associated summaries for\n visualizing the performance of the network.\n\n Args:\n total_loss: Total loss from loss().\n Returns:\n loss_averages_op: op for generating moving averages of losses.\n \"\"\"\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n\n for l in losses + [total_loss]:\n #print(l.op.name)\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n #tf.scalar_summary(l.op.name + ' (raw)', l)\n #tf.scalar_summary(l.op.name, loss_averages.average(l))\n tf.summary.scalar(l.op.name + ' (raw)', l)\n tf.summary.scalar(l.op.name, loss_averages.average(l))\n #tf.scalar_summary([l.op.name + ' (raw)'], l)\n #tf.scalar_summary([l.op.name], loss_averages.average(l))\n\n return loss_averages_op\n\n\ndef total_loss_sum(losses):\n # Assemble all of the losses for the current tower only.\n #losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)\n #print(losses)\n # Calculate the total loss for the current tower.\n #regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n #regularization_losses = tf.contrib.losses.get_regularization_losses()\n regularization_losses = tf.losses.get_regularization_losses()\n #total_loss = tf.add_n(losses + regularization_losses, name='total_loss')\n total_loss = tf.add_n(losses + regularization_losses, name='total_loss')\n return total_loss\n\n\ndef cross_entropy_loss(logits, labels):\n print('loss: cross-entropy')\n num_pixels = -1\n with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):\n labels = tf.reshape(labels, shape=[num_pixels])\n logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])\n mask = labels < FLAGS.num_classes\n idx = tf.where(mask)\n # # labels = tf.reshape(labels, shape=[num_pixels])\n # print(idx)\n labels = tf.to_float(labels)\n labels = tf.gather_nd(labels, idx)\n # labels = tf.boolean_mask(labels, mask)\n labels = tf.to_int32(labels)\n logits = tf.gather_nd(logits, idx)\n # logits = tf.boolean_mask(logits, mask)\n\n \n onehot_labels = tf.one_hot(labels, FLAGS.num_classes)\n xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)\n \n # range_idx = tf.range(tf.shape(labels)[0], dtype=tf.int32)\n # print(range_idx, labels)\n # labels = tf.reshape(labels, shape=[-1,1])\n # range_idx = tf.reshape(range_idx, shape=[-1,1])\n # idx = tf.concat([range_idx, labels], axis=1)\n # print(idx)\n # probs = tf.nn.softmax(logits)\n # probs = tf.gather_nd(probs, idx)\n # print(probs)\n # xent = tf.square(1 - probs) * xent\n # # xent = tf.pow(1 - probs, 3) * xent\n # # xent = (1 - probs) * xent\n\n #num_labels = tf.to_float(tf.reduce_sum(num_labels))\n #num_labels = tf.reduce_sum(tf.to_float(num_labels))\n\n #class_hist = tf.Print(class_hist, [class_hist], 'hist = ', summarize=30)\n #num_labels = tf.reduce_sum(onehot_labels)\n\n #class_hist = tf.to_float(tf.reduce_sum(class_hist, axis=0))\n ##num_labels = tf.Print(num_labels, [num_labels, tf.reduce_sum(onehot_labels)], 'lab = ', summarize=30)\n #class_weights = num_labels / (class_hist + 1)\n ##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)\n ## we need to append 0 here for ignore pixels\n #class_weights = tf.concat([class_weights, [0]], axis=0)\n ##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)\n #class_weights = tf.minimum(tf.to_float(max_weight), class_weights)\n\n # class_weights = tf.ones([FLAGS.num_classes])\n # class_weights = tf.concat([class_weights, [0]], axis=0)\n # #class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)\n # weights = tf.gather(class_weights, labels)\n\n xent = tf.reduce_mean(xent)\n return xent\n\ndef weighted_cross_entropy_loss(logits, labels, class_hist=None, max_weight=1):\n print('loss: cross-entropy')\n print('Using balanced loss with max weight = ', max_weight)\n num_pixels = -1\n with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):\n labels = tf.reshape(labels, shape=[num_pixels])\n onehot_labels = tf.one_hot(labels, FLAGS.num_classes)\n logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])\n xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)\n\n #num_labels = tf.to_float(tf.reduce_sum(num_labels))\n #num_labels = tf.reduce_sum(tf.to_float(num_labels))\n\n #class_hist = tf.Print(class_hist, [class_hist], 'hist = ', summarize=30)\n num_labels = tf.reduce_sum(onehot_labels)\n\n #class_hist = tf.to_float(tf.reduce_sum(class_hist, axis=0))\n ##num_labels = tf.Print(num_labels, [num_labels, tf.reduce_sum(onehot_labels)], 'lab = ', summarize=30)\n #class_weights = num_labels / (class_hist + 1)\n ##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)\n ## we need to append 0 here for ignore pixels\n #class_weights = tf.concat([class_weights, [0]], axis=0)\n ##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)\n #class_weights = tf.minimum(tf.to_float(max_weight), class_weights)\n\n class_weights = tf.ones([FLAGS.num_classes])\n class_weights = tf.concat([class_weights, [0]], axis=0)\n #class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)\n weights = tf.gather(class_weights, labels)\n\n if max_weight > 1:\n raise ValueError()\n wgt_sum = tf.reduce_sum(weights)\n norm_factor = num_labels / wgt_sum\n # weights need to sum to 1\n weights = tf.multiply(weights, norm_factor)\n\n xent = tf.multiply(weights, xent)\n #num_labels = tf.Print(num_labels, [num_labels, wgt_sum], 'num_labels = ')\n #xent = tf.Print(xent, [xent], 'num_labels = ')\n xent = tf.reduce_sum(xent) / num_labels\n return xent\n\n\ndef weighted_cross_entropy_loss_dense(logits, labels, weights=None,\n num_labels=None, max_weight=100):\n print('loss: cross-entropy')\n num_pixels = -1\n with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):\n labels = tf.reshape(labels, shape=[num_pixels])\n onehot_labels = tf.one_hot(labels, FLAGS.num_classes)\n logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])\n xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)\n\n if num_labels is None:\n num_labels = tf.reduce_sum(onehot_labels)\n else:\n num_labels = tf.reduce_sum(num_labels)\n\n print('Using balanced loss with max weight = ', max_weight)\n weights = tf.reshape(weights, shape=[num_pixels])\n weights = tf.minimum(tf.to_float(max_weight), weights)\n wgt_sum = tf.reduce_sum(weights)\n norm_factor = num_labels / wgt_sum\n # weights need to sum to 1\n weights = tf.multiply(weights, norm_factor)\n xent = tf.multiply(weights, xent)\n\n #num_labels = tf.Print(num_labels, [num_labels, wgt_sum], 'num_labels = ')\n #xent = tf.Print(xent, [xent], 'num_labels = ')\n xent = tf.reduce_sum(xent) / num_labels\n print(xent)\n return xent\n\n\ndef cross_entropy_loss_old(logits, labels, weights, num_labels):\n print('loss: cross-entropy')\n num_pixels = -1\n with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels, num_labels]):\n labels = tf.reshape(labels, shape=[num_pixels])\n onehot_labels = tf.one_hot(labels, FLAGS.num_classes)\n logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])\n xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)\n weights = tf.reshape(weights, shape=[num_pixels])\n xent = tf.multiply(weights, xent)\n xent = tf.reduce_sum(xent) / tf.reduce_sum(num_labels)\n print(xent)\n return xent\n\n\ndef mse(yp, yt):\n num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n with tf.name_scope('MeanSquareError'):\n yt = tf.reshape(yt, shape=[num_examples])\n yp = tf.reshape(yp, shape=[num_examples])\n return tf.reduce_mean(tf.square(yt - yp))\n\n\n\ndef weighted_cross_entropy_loss_deprecated(logits, labels, weights=None, max_weight=100):\n#def weighted_cross_entropy_loss(logits, labels, weights=None, max_weight=1e2):\n#def weighted_cross_entropy_loss(logits, labels, weights=None, max_weight=1e3):\n print('loss: Weighted Cross Entropy Loss')\n shape = labels.get_shape().as_list()\n print(shape)\n #num_examples = shape[0] * shape[1]\n num_examples = -1\n #num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n with tf.name_scope(None, 'WeightedCrossEntropyLoss', [logits, labels]):\n labels = tf.reshape(labels, shape=[num_examples])\n #num_labels = tf.to_float(tf.reduce_sum(num_labels))\n one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])\n num_labels = tf.to_float(tf.reduce_sum(one_hot_labels))\n logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n # todo\n #log_softmax = tf.log(tf.nn.softmax(logits_1d)) - never do this!\n log_softmax = tf.nn.log_softmax(logits_1d)\n xent = tf.reduce_sum(-tf.multiply(tf.to_float(one_hot_labels), log_softmax), 1)\n #weighted_xent = tf.mul(weights, xent)\n if weights != None:\n weights = tf.reshape(weights, shape=[num_examples])\n xent = tf.mul(tf.minimum(tf.to_float(max_weight), weights), xent)\n #weighted_xent = xent\n\n total_loss = tf.div(tf.reduce_sum(xent), tf.to_float(num_labels), name='value')\n print(total_loss)\n return total_loss\n\n\ndef flip_xent_loss(logits, labels, weights, max_weight=10):\n print('Loss: Weighted Cross Entropy Loss')\n assert(FLAGS.batch_size == 2)\n num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n labels = tf.reshape(labels, shape=[num_examples])\n weights = tf.reshape(weights, shape=[num_examples])\n #num_labels = tf.to_float(tf.reduce_sum(num_labels))\n with tf.name_scope('FlipXentLoss', [logits, labels]):\n one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])\n num_labels = tf.to_float(tf.reduce_sum(one_hot_labels))\n #print(logits[].get_shape())\n logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n # TODO\n #log_softmax = tf.log(tf.nn.softmax(logits_1d))\n log_softmax = tf.nn.log_softmax(logits_1d)\n xent = tf.reduce_sum(tf.mul(tf.to_float(one_hot_labels), log_softmax), 1)\n #weighted_xent = tf.mul(weights, xent)\n weighted_xent = tf.mul(tf.minimum(tf.to_float(max_weight), weights), xent)\n #weighted_xent = xent\n\n total_loss = - tf.div(tf.reduce_sum(weighted_xent), num_labels, name='value')\n return total_loss\n\n\n\ndef slim_cross_entropy_loss(logits, labels, num_labels):\n print('Loss: Cross Entropy Loss')\n num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n xent_loss = slim.losses.cross_entropy_loss(logits_1d, one_hot_labels)\n return xent_loss\n\n\ndef softmax(logits):\n num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n with tf.op_scope([logits], None, 'Softmax'):\n logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n softmax_1d = tf.nn.softmax(logits_1d)\n softmax_2d = tf.reshape(softmax_1d, [FLAGS.img_height, FLAGS.img_width, FLAGS.num_classes])\n return softmax_2d\n\n\n\n\ndef multiclass_hinge_loss(logits, labels, weights):\n print('loss: Hinge loss')\n num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n num_classes = FLAGS.num_classes\n with tf.op_scope([logits, labels], None, 'MulticlassHingeLoss'):\n #logits = tf.reshape(logits, [num_examples, num_classes])\n #labels = tf.reshape(labels, [num_examples])\n #weights = tf.reshape(weights, [num_examples])\n logits = tf.reshape(logits, [-1, num_classes])\n labels = tf.reshape(labels, [-1])\n weights = tf.reshape(weights, [-1])\n select_mask = tf.greater_equal(labels, 0)\n logits = tf.boolean_mask(logits, select_mask)\n labels = tf.boolean_mask(labels, select_mask)\n weights = tf.boolean_mask(weights, select_mask)\n num_examples = tf.reduce_sum(tf.to_int32(select_mask))\n #num_examples = tf.Print(num_examples, [num_examples, num_labels_old], 'num_examples = ')\n #print(labels)\n #print(logits)\n #print(weights)\n #print(select_mask)\n partitions = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0, dtype=tf.int32)\n #print(partitions)\n #one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n #one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])\n #partitions = tf.to_int32(one_hot_labels)\n\n num_partitions = 2\n scores, score_yt = tf.dynamic_partition(logits, partitions, num_partitions)\n #scores = tf.reshape(scores, [num_examples, num_classes - 1])\n #score_yt = tf.reshape(score_yt, [num_examples, 1])\n scores = tf.reshape(scores, [-1, num_classes - 1])\n score_yt = tf.reshape(score_yt, [-1, 1])\n #print(scores)\n #print(score_yt)\n\n #hinge_loss = tf.maximum(0.0, scores - score_yt + margin)\n hinge_loss = tf.square(tf.maximum(0.0, scores - score_yt + 1.0))\n hinge_loss = tf.reduce_sum(hinge_loss, 1)\n\n #total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))\n #total_loss = tf.div(total_loss, tf.to_float(num_examples), name='value')\n total_loss = tf.reduce_mean(tf.mul(tf.minimum(100.0, weights), hinge_loss))\n\n #tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)\n #tf.nn.l2_loss(t, name=None)\n return total_loss\n\n\ndef metric_hinge_loss(logits, labels, weights, num_labels):\n print('loss: Hinge loss')\n num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n with tf.op_scope([logits, labels], None, 'weightedhingeloss'):\n one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n #codes = tf.nn.softmax(logits_1d)\n codes = tf.nn.l2_normalize(logits_1d, 1)\n # works worse\n # l2 loss -> bad!\n # todo - this is not true svm loss, try it from cs231n\n l2_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1))\n m = 0.2\n #l2_dist = tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1)\n #m = 0.2 ** 2\n #m = 0.1 ** 2\n #m = 0.3 ** 2\n for i in range(num_classes):\n for j in range(num_classes):\n raise valueerror(1)\n hinge_loss = tf.maximum(tf.to_float(0), l2_dist - m)\n total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))\n\n total_loss = tf.div(total_loss, tf.to_float(num_labels), name='value')\n tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)\n\n #tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)\n #tf.nn.l2_loss(t, name=None)\n return total_loss\n\n#def weighted_hinge_loss(logits, labels, weights, num_labels):\n# print('Loss: Hinge Loss')\n# num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width\n# with tf.op_scope([logits, labels], None, 'WeightedHingeLoss'):\n# weights = tf.reshape(weights, shape=[num_examples])\n# labels = tf.reshape(labels, shape=[num_examples])\n# num_labels = tf.to_float(tf.reduce_sum(num_labels))\n# one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n# one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])\n# logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n# #codes = tf.nn.softmax(logits_1d)\n# codes = tf.nn.l2_normalize(logits_1d, 1)\n# # works worse\n# #l2_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1))\n# #m = 0.2\n# l2_dist = tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1)\n# m = 0.2 ** 2\n# #m = 0.1 ** 2\n# #m = 0.3 ** 2\n# hinge_loss = tf.maximum(tf.to_float(0), l2_dist - m)\n# total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))\n#\n# total_loss = tf.div(total_loss, tf.to_float(num_labels), name='value')\n# tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)\n#\n# #tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)\n# #tf.nn.l2_loss(t, name=None)\n# return total_loss\n\ndef flip_xent_loss_symmetric(logits, labels, weights, num_labels):\n print('Loss: Weighted Cross Entropy Loss')\n num_examples = FLAGS.img_height * FLAGS.img_width\n with tf.op_scope([logits, labels], None, 'WeightedCrossEntropyLoss'):\n labels = tf.reshape(labels, shape=[2, num_examples])\n weights = tf.reshape(weights, shape=[2, num_examples])\n num_labels = tf.to_float(tf.reduce_sum(num_labels))\n #num_labels = tf.to_float(num_labels[0])\n logits_flip = logits[1,:,:,:]\n #weights_flip = weights[1,:]\n\n logits = logits[0,:,:,:]\n weights = weights[0,:]\n labels = labels[0,:]\n one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)\n one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])\n\n #logits_orig, logits_flip = tf.split(0, 2, logits)\n logits_flip = tf.image.flip_left_right(logits_flip)\n #print(logits[].get_shape())\n logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])\n logits_1d_flip = tf.reshape(logits_flip, [num_examples, FLAGS.num_classes])\n # TODO\n log_softmax = tf.nn.log_softmax(logits_1d)\n\n #log_softmax_flip = tf.nn.log_softmax(logits_1d_flip)\n softmax_flip = tf.nn.softmax(logits_1d_flip)\n xent = tf.reduce_sum(tf.mul(tf.to_float(one_hot_labels), log_softmax), 1)\n weighted_xent = tf.mul(tf.minimum(tf.to_float(100), weights), xent)\n xent_flip = tf.reduce_sum(tf.mul(softmax_flip, log_softmax), 1)\n xent_flip = tf.mul(tf.minimum(tf.to_float(100), weights), xent_flip)\n #weighted_xent = tf.mul(weights, xent)\n #weighted_xent = xent\n\n #total_loss = tf.div(- tf.reduce_sum(weighted_xent_flip),\n # num_labels, name='value')\n total_loss = - tf.div(tf.reduce_sum(weighted_xent) + tf.reduce_sum(xent_flip),\n num_labels, name='value')\n\n tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)\n return total_loss\n\n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.losses.get_regularization_losses", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.reshape", "tensorflow.gather_nd", "tensorflow.ones", "tensorflow.image.flip_left_right", "tensorflow.name_scope", "tensorflow.one_hot", "tensorflow.concat", "tensorflow.nn.softmax", "tensorflow.reduce_sum", "tensorflow.greater_equal", "tensorflow.train.ExponentialMovingAverage", "tensorflow.minimum", "tensorflow.multiply", "tensorflow.nn.log_softmax", "tensorflow.add_to_collection", "tensorflow.add_n", "tensorflow.op_scope", "tensorflow.get_collection", "tensorflow.to_float", "tensorflow.mul", "tensorflow.dynamic_partition", "tensorflow.boolean_mask", "tensorflow.nn.l2_normalize", "tensorflow.to_int64", "tensorflow.reduce_mean", "tensorflow.to_int32", "tensorflow.where", "tensorflow.square", "tensorflow.gather", "tensorflow.maximum" ] ]
banroku/analySS
[ "15ba9e9216f86a1bf74062eae479a3ce1c9c5a11" ]
[ "drawSS.py" ]
[ "# coding=utf-8\ndef thinningSS(file, max_strain=10, interval=0.1):\n '''a function to conduct data thinning of SS curve at range (0, MAX_STRAIN), with INTERVAL\n This returns np.series of stress with strain in the index. \n FILE should be passed as dictionary containing following: \n 'name': name of sample like 'RL7785'\n 'crv': path(relative) of xxx_crv.csv file\n 'rlt': path(relative) of xxx_rlt.csv file\n 'set': path(relative) of xxx_set.csv file\n '''\n import pandas as pd\n import numpy as np\n \n # read files and parameters\n data = pd.read_csv(file['crv'], sep=',', encoding='shift_jis', skiprows=1, index_col=0)\n data_rlt = pd.read_csv(file['rlt'], sep=',', encoding='shift_jis')\n L = 64 # span\n b = float(data_rlt.iloc[2, 3]) # width of first specimen\n h = float(data_rlt.iloc[2, 4]) # height of first specimen\n #print('span, width, height of first specimen:', L, ',', b, ',', h)#cut out curve of first specimen\n col = ['mm', 'N']\n data = data.reindex(columns=col)\n data.dropna(subset=['mm'], inplace=True)\n \n #%% convert (mm, N) to (%, MPa)\n # sigma = 3*F*L / (2*b*h^2)\n # epsilon = 6*100*s*h / (L^2)\n # F: load, L:span = 64 mm, b:width, h:height, s=strain/mm\n data['strain'] = data['mm'] * 6 * 100 * h / L / L\n data['stress'] = data['N'] * 3 * L / (2 * b * h * h)\n \n #%% data thinnings\n interval_steps = int(max_strain/interval)\n marker = pd.DataFrame({'strain': np.round(np.linspace(0, max_strain, interval_steps, endpoint=False), 2), 'marker': True})\n data_marked = pd.merge(data, marker, on='strain', how='outer')\n data_marked.rename(data_marked['strain'], inplace=True)\n data_marked.sort_values(by=['strain'], inplace=True)\n data_marked.interpolate(method='slinear', limit=1, inplace=True)\n data_marked['marker'].fillna('False', inplace=True)\n data_skipped = data_marked[data_marked['marker']==True]\n thinnedSS = data_skipped['stress']\n thinnedSS.name = file['name']\n \n return thinnedSS\n\n\n#%%\ndef parameters(file):\n '''a function to pick following parameters as pd.Series: \n parameters = ['width', 'height', 'FM', 'FS_max', 'FS_break', 'FE_max', 'FE_break', \n 'd_width', 'd_height', 'd_FM', 'd_FS_max', 'd_FS_break', 'd_FE_max', 'd_FE_break']\n FILE should be passed as dictionary containing following: \n 'name': name of sample like 'RL7785'\n 'crv': path(relative) of xxx_crv.csv file\n 'rlt': path(relative) of xxx_rlt.csv file\n 'set': path(relative) of xxx_set.csv file '''\n\n file_rlt = file['rlt']\n data_rlt = pd.read_csv(file_rlt, sep=',', skiprows=[1,2], index_col=0, encoding='shift_jis')\n parameters = ['幅', '厚さ', '弾性率', '最大点', '破壊点', '最大点.1', '破壊点.1']\n data_rlt = data_rlt.loc[['単純平均', '標準偏差'], parameters]\n data_rlt.index = ['average', 'stdev']\n data_rlt.columns = ['width', 'height', 'FM', 'FS_max', 'FS_break', 'FE_max', 'FE_break']\n data_rlt = data_rlt.values\n data_flattened = [item for sublist in data_rlt for item in sublist] #see below\n parameters = ['width', 'height', 'FM', 'FS_max', 'FS_break', 'FE_max', 'FE_break', \n 'd_width', 'd_height', 'd_FM', 'd_FS_max', 'd_FS_break', 'd_FE_max', 'd_FE_break']\n data_rlt = pd.Series(data_flattened, index=parameters) \n data_rlt.name = file['name']\n \n return data_rlt" ]
[ [ "pandas.read_csv", "numpy.linspace", "pandas.Series", "pandas.merge" ] ]
ExplosiveJam/fickettmodel-reproducibility
[ "e47af1d3e2513d35dad65c16d4fd68c23e505f87" ]
[ "bifurcation-diagram/run.py" ]
[ "#!/usr/bin/env python\nr\"\"\" Run many simulations with varying :math:`\\theta`.\n\nThe simulations are run.\nSeparate script should plot bifurcation diagram.\n\n\"\"\"\nimport argparse\nimport os\nimport sys\nimport shutil\n\nimport numpy as np\n\nfrom mpi4py import MPI\n\nfrom saf.fm.nonlinear import Config\nfrom saf.action import solve\nfrom saf.util import reset_logging\n\nTOTAL_THETAS = 251\nFINAL_TIME = 1000\nQ = 4\nIO_FORMAT = 'numpy'\n\n# Format for floating-point numbers.\nFMT = '.3f'\n\n\ndef _worker(tasks, rank):\n for t in tasks:\n _worker_single_task(t, rank)\n\n\ndef _worker_single_task(task, rank):\n theta = task\n worker_name = rank\n\n try:\n outdir = 'theta={:{fmt}}'.format(theta, fmt=FMT)\n outdir = os.path.join(OUTPUT_DIR, outdir)\n\n if os.path.exists(outdir):\n shutil.rmtree(outdir)\n os.mkdir(outdir)\n outname = os.path.join(outdir, 'stdout.log')\n errname = os.path.join(outdir, 'stderr.log')\n sys.stdout = open(outname, 'w')\n sys.stderr = open(errname, 'w')\n msg = 'Worker {} | theta={:{fmt}}'.format(worker_name, theta, fmt=FMT)\n print(msg)\n except Exception as e:\n print('theta={:{fmt}} | {}'.format(theta, str(e), fmt=FMT))\n return\n\n try:\n c = _get_config(theta)\n solve('nonlinear', c, outdir, log_to_file=False)\n reset_logging()\n except Exception as e:\n print('theta={:{fmt}} | {}'.format(theta, str(e), fmt=FMT))\n sys.stdout = sys.__stdout__\n print('theta={:{fmt}} | {}'.format(theta, str(e), fmt=FMT))\n\n\ndef _get_config(theta):\n c = Config()\n\n c.n12 = N12\n c.final_time = FINAL_TIME\n c.dt = 0.005\n c.approximator = 'godunov-minmod'\n c.time_integrator = 'dopri5'\n c.plot_time_step = 0\n c.io_format = IO_FORMAT\n c.play_animation = False\n\n c.lambda_tol = 1e-6\n c.q = Q\n c.theta = theta\n c.reaction_rate_version = 'v2' # Expression exactly as in FariaEtAl2015.\n c.f = 1\n c.ic_amplitude = 0.0\n c.ic_type = 'gaussian'\n c.truncation_coef = 1e6\n\n return c\n\n\np = argparse.ArgumentParser()\np.add_argument('N12', help='Resolution', type=int)\nargs = p.parse_args()\nN12 = args.N12\nOUTPUT_DIR = os.path.join('_output', 'N12={:04d}'.format(N12))\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\nall_tasks = []\n\n# Build `all_tasks` in master process to distribute it to all processes.\nif rank == 0:\n # Uniformly spaced values of :math:`\\theta`.\n theta_values = np.linspace(0.90, 1.15, num=TOTAL_THETAS)\n\n for i in range(size):\n all_tasks.append([])\n\n for i in range(len(theta_values)):\n all_tasks[i % size].append(theta_values[i])\n\n# Now distribute the tasks to each process.\ntasks = comm.scatter(all_tasks, root=0)\n_worker(tasks, rank)\n" ]
[ [ "numpy.linspace" ] ]
ec-better/ewf-ethz-03-01-01
[ "5ca616e5c25bbba29013a7de248af4b69757921b" ]
[ "src/main/app-resources/notebook/libexec/helpers.py" ]
[ "# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nimport cv2\nimport re\n\nfrom shapely import wkt\nfrom shapely.geometry import box, Polygon\nimport pandas as pd\nimport geopandas as gpd\n\nfrom osgeo import gdal, gdalnumeric, osr, ogr\n\ndef ensure_dir(file_path):\n directory = os.path.dirname(file_path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\ndef getResolution(demFolder, return_full_paths = False):\n rasterFilePaths = [f for f in os.listdir(demFolder) if os.path.isfile(os.path.join(demFolder, f))]\n \n if return_full_paths:\n rasterFilePaths = [demFolder + '/' + f for f in rasterFilePaths if f[:4] == 'DEM_' and f[-4:] == '.tif']\n rasterFilePaths.sort(reverse=True)\n else:\n rasterFilePaths = [int(f[4:-4]) for f in rasterFilePaths if f[:4] == 'DEM_' and f[-4:] == '.tif']\n\n return rasterFilePaths\n\ndef readGDAL2numpy(rasterPath, return_geoInformation = False):\n try:\n ds = gdal.Open(rasterPath)\n except RuntimeError:\n print('Unable to open input file')\n sys.exit(1)\n \n data = gdalnumeric.LoadFile(rasterPath, False)\n noDataVal = ds.GetRasterBand(1).GetNoDataValue()\n try:\n if data.dtype in ['float16', 'float32', 'float64'] and noDataVal is not None:\n data[data == noDataVal] = np.NaN\n except:\n print(\"Issue in no data value\")\n \n \n if return_geoInformation == False:\n return data\n else:\n geoTransform = ds.GetGeoTransform()\n projection = ds.GetProjection() \n return data, geoTransform, projection\n\ndef writeNumpyArr2Geotiff(outputPath, data, geoTransform = None, projection = None, GDAL_dtype = gdal.GDT_Byte, noDataValue = None):\n nscn, npix = data.shape\n \n if np.isnan(data).any() and noDataValue is not None:\n data[np.isnan(data)] = noDataValue\n \n ds_new = gdal.GetDriverByName('GTiff').Create(outputPath, npix, nscn, 1, GDAL_dtype)\n \n if geoTransform != None:\n ds_new.SetGeoTransform(geoTransform)\n \n if projection != None:\n ds_new.SetProjection(projection) \n \n outBand = ds_new.GetRasterBand(1)\n outBand.WriteArray(data)\n \n if noDataValue != None:\n ds_new.GetRasterBand(1).SetNoDataValue(noDataValue)\n \n # Close dataset\n ds_new.FlushCache()\n ds_new = None\n outBand = None\n \ndef writeNumpyArr2Saga(outputPath, data, geoTransform = None, projection = None, GDAL_dtype = gdal.GDT_Byte, noDataValue = None):\n nscn, npix = data.shape\n \n if np.isnan(data).any() and noDataValue is not None:\n data[np.isnan(data)] = noDataValue\n \n ds_new = gdal.GetDriverByName('SAGA').Create(outputPath, npix, nscn, 1, GDAL_dtype) \n \n outBand = ds_new.GetRasterBand(1)\n outBand.WriteArray(data)\n \n if noDataValue != None:\n ds_new.GetRasterBand(1).SetNoDataValue(noDataValue)\n \n if projection != None:\n ds_new.SetProjection(projection) \n \n # Close dataset\n ds_new.FlushCache()\n ds_new = None\n outBand = None\n\ndef wkt2bbox(wkt_input):\n wkt_geometry = wkt.loads(wkt_input)\n minx, miny, maxx, maxy = wkt_geometry.bounds\n b = box(minx, miny, maxx, maxy)\n bbox_tuple = list(b.exterior.coords)\n bbox = []\n for point in bbox_tuple:\n bbox.append([point[0],point[1]])\n return bbox\n\ndef wkt2shp(wkt_input, target_epsg, dst_file, bbox=False):\n ensure_dir(dst_file)\n if bbox:\n polygon = Polygon(wkt2bbox(wkt_input))\n else:\n polygon = wkt.loads(wkt_input)\n gpd.GeoDataFrame(pd.DataFrame(['p1'], columns = ['geom']),\n crs = {'init':'epsg:' + str(target_epsg)},\n geometry = [polygon]).to_file(dst_file)\n \ndef rescaleDEM(image, noData = None, maxVal = 255):\n if noData:\n image = np.float32(image)\n image[image == noData] = np.nan\n \n minElev = np.nanmin(image)\n maxElev = np.nanmax(image)\n \n rescaled = ( ((image - minElev)/(maxElev- minElev)) * (maxVal - 1) ) + 1\n return np.uint8(rescaled)\n\ndef joinStrArg(str1, str2, str3 = None):\n if str3 is not None:\n return str(str1) + ' ' + str(str2) + ' ' + str(str3)\n else:\n return str(str1) + ' ' + str(str2) \n\ndef wkt2EPSG(wkt, epsg='/usr/local/share/proj/epsg', forceProj4=False):\n ''' \n Transform a WKT string to an EPSG code\n \n Arguments\n ---------\n \n wkt: WKT definition\n epsg: the proj.4 epsg file (defaults to '/usr/local/share/proj/epsg')\n forceProj4: whether to perform brute force proj4 epsg file check (last resort)\n \n Returns: EPSG code\n \n '''\n code = None\n p_in = osr.SpatialReference()\n s = p_in.ImportFromWkt(wkt)\n if s == 5: # invalid WKT\n return None\n if p_in.IsLocal() == 1: # this is a local definition\n return p_in.ExportToWkt()\n if p_in.IsGeographic() == 1: # this is a geographic srs\n cstype = 'GEOGCS'\n else: # this is a projected srs\n cstype = 'PROJCS'\n an = p_in.GetAuthorityName(cstype)\n ac = p_in.GetAuthorityCode(cstype)\n if an is not None and ac is not None: # return the EPSG code\n return '%s:%s' % \\\n (p_in.GetAuthorityName(cstype), p_in.GetAuthorityCode(cstype))\n else: # try brute force approach by grokking proj epsg definition file\n p_out = p_in.ExportToProj4()\n if p_out:\n if forceProj4 is True:\n return p_out\n f = open(epsg)\n for line in f:\n if line.find(p_out) != -1:\n m = re.search('<(\\\\d+)>', line)\n if m:\n code = m.group(1)\n break\n if code: # match\n return 'EPSG:%s' % code\n else: # no match\n return None\n else:\n return None\n \ndef getCornerCoordinates(gdal_dataSet, target_srs = False):\n \"\"\"\n :param gdal_dataSet: /path/to/file OR gdal dataset\n :param target_srs: False for output coordinates in same coordinate system OR 'wgs84' for lat long values OR custom osr.SpatialReference() object\n :return: list of corner coordinates\n\n --0--------3--\n | |\n | | <--- Index of coordinates returned in list\n | |\n --1--------2--\n \"\"\"\n\n\n if type(gdal_dataSet) is str:\n gdal_dataSet = gdal.Open(gdal_dataSet)\n\n gt=gdal_dataSet.GetGeoTransform() # gt = [ulx, xres, xskew, uly, yskew, yres]\n cols = gdal_dataSet.RasterXSize\n rows = gdal_dataSet.RasterYSize\n\n def GetExtent(gt,cols,rows):\n ''' Return list of corner coordinates from a geotransform\n @type gt: C{tuple/list}\n @param gt: geotransform\n @type cols: C{int}\n @param cols: number of columns in the dataset\n @type rows: C{int}\n @param rows: number of rows in the dataset\n @rtype: C{[float,...,float]}\n @return: coordinates of each corner\n '''\n ext=[]\n xarr=[0,cols]\n yarr=[0,rows]\n\n for px in xarr:\n for py in yarr:\n x=gt[0]+(px*gt[1])+(py*gt[2])\n y=gt[3]+(px*gt[4])+(py*gt[5])\n ext.append([x,y])\n #print(x,y)\n yarr.reverse()\n return ext\n\n def ReprojectCoords(coords,src_srs,tgt_srs):\n ''' Reproject a list of x,y coordinates.\n\n @type geom: C{tuple/list}\n @param geom: List of [[x,y],...[x,y]] coordinates\n @type src_srs: C{osr.SpatialReference}\n @param src_srs: OSR SpatialReference object\n @type tgt_srs: C{osr.SpatialReference}\n @param tgt_srs: OSR SpatialReference object\n @rtype: C{tuple/list}\n @return: List of transformed [[x,y],...[x,y]] coordinates\n '''\n trans_coords=[]\n transform = osr.CoordinateTransformation( src_srs, tgt_srs)\n for x,y in coords:\n x,y,z = transform.TransformPoint(x,y)\n trans_coords.append([x,y])\n return trans_coords\n\n ext = GetExtent(gt,cols,rows)\n\n src_srs=osr.SpatialReference()\n src_srs.ImportFromWkt(gdal_dataSet.GetProjection())\n\n if target_srs == False:\n return ext\n elif target_srs == 'wgs84':\n #target_srs = src_srs.CloneGeogCS()\n #\n target_srs=osr.SpatialReference()\n target_srs.ImportFromEPSG(4326)\n\n return ReprojectCoords(ext,src_srs,target_srs)\n\ndef resizeToDEM(imPath, sizeDEM = None, geoTransform = None, projection = None, noData = None):\n imDS = gdal.Open(imPath, gdal.GA_ReadOnly)\n imPix = imDS.RasterXSize\n imScn = imDS.RasterYSize\n \n nscn, npix = sizeDEM\n \n if sizeDEM is not None:\n if nscn != imScn or npix != imPix:\n print(\"Size Mismatch\")\n image = imDS.ReadAsArray()\n if noData is not None:\n image = np.float32(image)\n image[image == noData] = np.nan\n imNew = cv2.resize(image, (npix, nscn), interpolation=cv2.INTER_CUBIC)\n \n writeNumpyArr2Geotiff(imPath, imNew, geoTransform = geoTransform, projection = projection, GDAL_dtype = gdal.GDT_UInt16, noDataValue = noData)\n \ndef map_uint16_to_uint8(img, lower_bound=None, upper_bound=None):\n '''\n Map a 16-bit image trough a lookup table to convert it to 8-bit.\n\n '''\n if not(0 <= lower_bound < 2**16) and lower_bound is not None:\n raise ValueError(\n '\"lower_bound\" must be in the range [0, 65535]')\n if not(0 <= upper_bound < 2**16) and upper_bound is not None:\n raise ValueError(\n '\"upper_bound\" must be in the range [0, 65535]')\n if lower_bound is None:\n lower_bound = np.min(img)\n if upper_bound is None:\n upper_bound = np.max(img)\n if lower_bound >= upper_bound:\n raise ValueError(\n '\"lower_bound\" must be smaller than \"upper_bound\"')\n lut = np.concatenate([\n np.zeros(lower_bound, dtype=np.uint16),\n np.linspace(0, 255, upper_bound - lower_bound).astype(np.uint16),\n np.ones(2**16 - upper_bound, dtype=np.uint16) * 255\n ])\n return lut[img].astype(np.uint8) \n\ndef closeCV(mask, kernelSize = 11):\n kernel = np.ones((kernelSize, kernelSize),np.uint8)\n return cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n\ndef newGeoTransform(geoTransform, maskBounds):\n\tnewGeoTransform = (geoTransform[0]+ maskBounds['xMin'] * geoTransform[1],\n geoTransform[1],\n geoTransform[2],\n geoTransform[3] + maskBounds['yMin'] * geoTransform[5],\n geoTransform[4],\n geoTransform[5]) \n\treturn newGeoTransform\n\ndef shrinkGeoTransform(geoTransform, factor):\n\tnewGeoTransform = (geoTransform[0],\n geoTransform[1] / factor,\n geoTransform[2],\n geoTransform[3],\n geoTransform[4],\n geoTransform[5] / factor) \n\treturn newGeoTransform\n" ]
[ [ "numpy.ones", "numpy.nanmax", "numpy.zeros", "pandas.DataFrame", "numpy.float32", "numpy.nanmin", "numpy.max", "numpy.min", "numpy.isnan", "numpy.linspace", "numpy.uint8" ] ]
eivindeb/gym-letMPC
[ "7041aa56a25aa9a1c749088f2b370c910d21fe75" ]
[ "gym_let_mpc/let_mpc.py" ]
[ "import gym\nfrom gym.utils import seeding\nimport numpy as np\nimport json\nfrom gym_let_mpc.simulator import ControlSystem\nfrom gym_let_mpc.controllers import ETMPC, AHMPC\nimport collections.abc\nimport matplotlib.pyplot as plt\nfrom gym_let_mpc.utils import str_replace_whole_words\nimport copy\n\n\nclass LetMPCEnv(gym.Env):\n def __init__(self, config_path):\n with open(config_path) as file_object:\n config = json.load(file_object)\n\n if config[\"mpc\"][\"model\"] == \"plant\":\n config[\"mpc\"][\"model\"] = copy.deepcopy(config[\"plant\"][\"model\"])\n elif config[\"mpc\"][\"model\"].get(\"parameters\", None) == \"plant\":\n config[\"mpc\"][\"model\"][\"parameters\"] = copy.deepcopy(config[\"plant\"][\"model\"][\"parameters\"])\n\n if config[\"lqr\"][\"model\"] == \"plant\":\n config[\"lqr\"][\"model\"] = copy.deepcopy(config[\"plant\"][\"model\"])\n elif config[\"lqr\"][\"model\"] == \"mpc\":\n config[\"lqr\"][\"model\"] = copy.deepcopy(config[\"mpc\"][\"model\"])\n elif config[\"lqr\"][\"model\"].get(\"parameters\", None) == \"plant\":\n config[\"lqr\"][\"model\"][\"parameters\"] = copy.deepcopy(config[\"plant\"][\"model\"][\"parameters\"])\n elif config[\"lqr\"][\"model\"].get(\"parameters\", None) == \"mpc\":\n config[\"lqr\"][\"model\"][\"parameters\"] = copy.deepcopy(config[\"mpc\"][\"model\"][\"parameters\"])\n\n self.config = config\n assert \"max_steps\" in self.config[\"environment\"]\n self.max_steps = self.config[\"environment\"][\"max_steps\"]\n\n assert \"randomize\" in self.config[\"environment\"]\n assert \"state\" in self.config[\"environment\"][\"randomize\"] and \"reference\" in self.config[\"environment\"][\"randomize\"]\n assert \"render\" in self.config[\"environment\"]\n if config[\"mpc\"][\"type\"] == \"ETMPC\":\n assert len(config[\"environment\"][\"action\"][\"variables\"]) == 1 and \\\n config[\"environment\"][\"action\"][\"variables\"][0][\"name\"] == \"mpc_compute\"\n controller = ETMPC(config[\"mpc\"], config[\"lqr\"])\n self.action_space = gym.spaces.Discrete(2)\n elif config[\"mpc\"][\"type\"] == \"AHMPC\":\n assert len(config[\"environment\"][\"action\"][\"variables\"]) == 1 and \\\n config[\"environment\"][\"action\"][\"variables\"][0][\"name\"] == \"mpc_horizon\"\n controller = AHMPC(config[\"mpc\"])\n self.action_space = gym.spaces.Box(low=np.array([1]), high=np.array([50]), dtype=np.float32)\n else:\n raise ValueError\n self.control_system = ControlSystem(config[\"plant\"], controller=controller)\n self.history = None\n self.steps_count = None\n self.np_random = None\n self.min_constraint_delta = 0.25 # TODO: how and where to set\n\n obs_high = []\n obs_low = []\n for obs_var in self.config[\"environment\"][\"observation\"][\"variables\"]:\n for var_transform in obs_var.get(\"transform\", [\"none\"]):\n for lim_i, lim in enumerate(obs_var.get(\"limits\", [None, None])):\n if lim is None:\n if lim_i == 0:\n obs_low.append(-np.finfo(np.float32).max)\n else:\n obs_high.append(np.finfo(np.float32).max)\n else:\n if var_transform == \"none\":\n if lim_i == 0:\n obs_low.append(lim)\n else:\n obs_high.append(lim)\n elif var_transform == \"absolute\":\n if lim_i == 0:\n obs_low.append(0)\n else:\n obs_high.append(lim)\n elif var_transform == \"square\":\n if lim_i == 0:\n obs_low.append(0)\n else:\n obs_high.append(lim ** 2)\n else:\n raise NotImplementedError\n self.observation_space = gym.spaces.Box(low=np.array(obs_low, dtype=np.float32),\n high=np.array(obs_high, dtype=np.float32),\n dtype=np.float32)\n\n self.value_function_is_set = False\n\n self.viewer = None\n\n def seed(self, seed=None):\n \"\"\"\n Seed the random number generator of the control system.\n :param seed: (int) seed for random state\n \"\"\"\n self.np_random, seed = gym.utils.seeding.np_random(seed)\n self.control_system.seed(seed)\n return [seed]\n\n def reset(self, state=None, reference=None, constraint=None, model=None, process_noise=None, tvp=None):\n \"\"\"\n Reset state of environment. Note that the simulator is reset, the MPC solution is computed and the first\n MPC action is applied to the plant.\n\n :param state: (dict) initial conditions (value) for state name (key).\n :param reference: (dict) reference value (value) for reference name (key).\n :param constraint: (dict) constraint values (value) for constraint names (key).\n :param model: (dict) dictionary of dictionary where first key is model that it applies to [\"plant\", \"mpc\", \"lqr\"],\n first value is dictionary of model parameters where second value is the specified model parameter value.\n :param process_noise: (dict) process noise values (value) as ndarray for state name (key). The process noise at\n each time step loops through the provided array.\n :param tvp: (dict) values of time-varying parameters. New values are generated if values arent specified\n for all time steps elapsed.\n :return: ([float]) observation vector\n \"\"\"\n def update_dict_recursively(d, u):\n for k, v in u.items():\n if isinstance(v, collections.abc.Mapping):\n d[k] = update_dict_recursively(d.get(k, {}), v)\n else:\n d[k] = v\n return d\n\n sampled_state = self.sample_state()\n sampled_reference = self.sample_reference()\n sampled_constraint = self.sample_constraints()\n sampled_model = self.sample_model()\n\n if state is not None:\n sampled_state.update(state)\n elif len(sampled_state) == 0:\n sampled_state = None\n if reference is not None:\n sampled_reference.update(reference)\n elif len(sampled_reference) == 0:\n sampled_reference = None\n if constraint is not None:\n sampled_constraint.update(constraint)\n elif len(sampled_constraint) == 0:\n sampled_constraint = None\n if model is not None:\n sampled_model = update_dict_recursively(sampled_model, model)\n elif len(sampled_model) == 0:\n sampled_model = None\n self.control_system.reset(state=sampled_state, reference=sampled_reference, constraint=sampled_constraint,\n model=sampled_model, process_noise=process_noise, tvp=tvp)\n if self.config[\"mpc\"][\"type\"] == \"ETMPC\":\n self.control_system.step(action=np.array([1]))\n obs = self.get_observation()\n self.history = {\"obs\": [obs], \"actions\": [], \"rewards\": []}\n self.steps_count = 0\n\n return obs\n\n def step(self, action):\n a_dict = {a_props[\"name\"]: action[a_i]\n for a_i, a_props in enumerate(self.config[\"environment\"][\"action\"][\"variables\"])}\n\n self.control_system.step(np.round(a_dict[\"mpc_horizon\"]).astype(np.int32))#np.atleast_1d(int(a_dict[\"mpc_compute\"])))\n self.history[\"actions\"].append(a_dict)\n self.steps_count += 1\n\n info = {}\n obs = self.get_observation()\n done = False\n if self.steps_count >= self.max_steps:\n done = True\n info[\"termination\"] = \"steps\"\n elif len(self.config[\"environment\"].get(\"end_on_constraint_violation\", [])) > 0:\n for c_name, c_d in self.control_system.get_constraint_distances().items():\n if c_name.split(\"-\")[1] in self.config[\"environment\"][\"end_on_constraint_violation\"] and c_d > 0:\n done = True\n info[\"termination\"] = \"constraint\"\n break\n\n rew = self.get_reward(done=done)\n for category, v in self.config[\"environment\"].get(\"info\", {}).items():\n if category == \"reward\":\n for rew_name, rew_expr in v.items():\n info[\"reward/{}\".format(rew_name)] = self.get_reward(rew_expr, done=done)\n else:\n raise NotImplementedError\n\n if self.value_function_is_set:\n step_vf_data = {\"mpc_state\": self.control_system.get_state_vector(self.control_system.history[\"state\"][-2]),\n \"mpc_next_state\": self.control_system.controller.mpc_state_preds[:, -1, -1]}\n step_vf_data[\"mpc_n_horizon\"] = self.control_system.controller.history[\"mpc_horizon\"][-1]\n info[\"mpc_value_fn\"] = (self.control_system.controller.value_function.eval([step_vf_data[\"mpc_next_state\"].reshape(1, -1)])[0][0, 0]).astype(np.float64)\n step_vf_data[\"mpc_rewards\"] = self.control_system.controller.mpc.opt_f_num.toarray()[0, 0] - \\\n self.config[\"mpc\"][\"objective\"].get(\"discount_factor\") ** (step_vf_data[\"mpc_n_horizon\"] + 1) * info[\"mpc_value_fn\"]\n info[\"mpc_computation_time\"] = sum([v for k, v in self.control_system.controller.mpc.solver_stats.items() if k.startswith(\"t_proc\")])\n info[\"data\"] = step_vf_data\n info[\"mpc_avg_stage_cost\"] = step_vf_data[\"mpc_rewards\"] / step_vf_data[\"mpc_n_horizon\"]\n\n info.update({k: v.astype(np.float64) if hasattr(v, \"dtype\") else v for k, v in a_dict.items()})\n\n self.history[\"obs\"].append(obs)\n self.history[\"rewards\"].append(rew)\n\n return obs, rew, done, info\n\n def render(self, mode='human', save_path=None): # TODO: add env renders\n figure, axes = None, None\n if self.viewer is None:\n env_plots = [plot_name for plot_name, make_plot in self.config[\"environment\"][\"render\"].items() if make_plot]\n if len(env_plots) > 0:\n figure, axes = plt.subplots(self.control_system.render_n_axes + len(env_plots), sharex=True,\n figsize=(9, 16))\n self.viewer = self.control_system.render(figure=figure, axes=axes, return_viewer=True)\n for i, plot in enumerate(env_plots):\n self.viewer[\"axes\"][plot] = axes[-(i + 1)]\n else:\n self.viewer = self.control_system.render(figure=figure, axes=axes, return_viewer=True)\n for plot_name, make_plot in self.config[\"environment\"][\"render\"].items():\n if make_plot:\n self.viewer[\"axes\"][plot_name].set_ylabel(\"-\".join(plot_name.split(\"_\")[1:]))\n x_data = np.array(range(self.steps_count)) * self.control_system.config[\"params\"][\"t_step\"]\n self.viewer[\"axes\"][plot_name].clear()\n if plot_name == \"plot_action\":\n for a_var in self.config[\"environment\"][\"action\"][\"variables\"]:\n y_data = [step_a[a_var[\"name\"]] for step_a in self.history[\"actions\"]]\n self.viewer[\"axes\"][plot_name].plot(x_data, y_data, label=a_var[\"name\"], drawstyle=\"steps\")\n elif plot_name == \"plot_reward\":\n self.viewer[\"axes\"][plot_name].plot(x_data, self.history[\"rewards\"], label=\"reward\")\n self.viewer[\"axes\"][plot_name].text(max(x_data) + self.control_system.config[\"params\"][\"t_step\"],\n self.history[\"rewards\"][-1],\n \"{:.3f}\".format(np.sum(self.history[\"rewards\"])))\n else:\n raise ValueError\n for axis in self.viewer[\"axes\"].values():\n axis.legend()\n if save_path is not None:\n self.viewer[\"figure\"].savefig(save_path, bbox_inches=\"tight\", format=\"png\")\n plt.close(self.viewer[\"figure\"])\n else:\n self.viewer[\"figure\"].show()\n\n def get_observation(self):\n obs = []\n for var in self.config[\"environment\"][\"observation\"][\"variables\"]:\n var_val = self._get_variable_value(var)\n for transform in var.get(\"transform\", [\"none\"]):\n if transform == \"none\":\n obs.append(var_val)\n elif transform == \"absolute\":\n obs.append(abs(var_val))\n elif transform == \"square\":\n obs.append(var_val ** 2)\n else:\n raise ValueError\n\n return np.array(obs)\n\n def get_reward(self, rew_expr=None, done=False):\n if rew_expr is None:\n rew_expr = self.config[\"environment\"][\"reward\"][\"expression\"]\n\n rew_expr = str_replace_whole_words(rew_expr, \"done\", int(done))\n\n for var in sorted(self.config[\"environment\"][\"reward\"][\"variables\"], key=lambda x: len(x), reverse=True):\n var_val = self._get_variable_value(var)\n if isinstance(var_val, list) or isinstance(var_val, np.ndarray): # TODO: needs to be better way to do this\n var_val = var_val[0]\n rew_expr = str_replace_whole_words(rew_expr, var[\"name\"], var_val)\n\n return eval(rew_expr)\n\n def _get_variable_value(self, var):\n if var[\"type\"] == \"state\":\n val = self.control_system.current_state[var[\"name\"]]\n elif var[\"type\"] == \"input\":\n if var.get(\"value_type\", \"absolute\") == \"absolute\":\n val = self.control_system.controller.current_input[var[\"name\"]]\n elif var.get(\"value_type\") == \"delta\":\n val = self.control_system.controller.history[\"inputs\"][-2][var[\"name\"]] - \\\n self.control_system.controller.current_input[var[\"name\"]]\n else:\n raise ValueError\n elif var[\"type\"] == \"reference\":\n val = self.control_system.controller.current_reference[var[\"name\"]]\n elif var[\"type\"] == \"tvp\":\n val = self.control_system.tvps[var[\"name\"]].get_values(self.steps_count)\n elif var[\"type\"] == \"error\":\n val = self.control_system.controller.history[\"errors\"][-1][var[\"name\"]]\n if np.isnan(val):\n val = 0\n elif var[\"type\"] == \"epsilon\":\n val = self.control_system.controller.history[\"epsilons\"][-1][var[\"name\"]]\n if np.isnan(val):\n val = 0\n elif var[\"type\"] == \"constraint\":\n if var.get(\"value_type\") == \"distance\":\n val = self.control_system.get_constraint_distances((var[\"name\"],))[var[\"name\"]]\n else:\n raise ValueError\n elif var[\"type\"] == \"action\":\n if var.get(\"value_type\", \"agent\") == \"agent\":\n val = self.history[\"actions\"][-1][var[\"name\"]]\n elif var.get(\"value_type\") == \"controller\":\n val = self.control_system.controller.history[var[\"name\"]][-1]\n else:\n raise ValueError\n elif var[\"type\"] == \"time\":\n if var.get(\"value_type\") == \"fraction\":\n val = self.control_system.controller.steps_since_mpc_computation / self.control_system.controller.mpc.n_horizon\n elif var.get(\"value_type\") == \"absolute\":\n val = self.control_system.controller.steps_since_mpc_computation\n else:\n raise ValueError\n elif var[\"type\"] == \"parameter\":\n if var[\"value_type\"] in [\"plant\", \"mpc\", \"lqr\"]:\n val = self.config[var[\"value_type\"]][\"model\"][\"parameters\"][var[\"name\"]]\n else:\n raise ValueError\n else:\n raise ValueError\n\n if isinstance(val, np.ndarray):\n val = val[0]\n if \"limits\" in var:\n val = np.clip(val, var[\"limits\"][0], var[\"limits\"][1])\n\n return val\n\n def sample_constraints(self):\n constraints = {}\n for c_name, c_props in self.config[\"environment\"].get(\"randomize\", {}).get(\"constraints\", {}).items():\n constraint_val = getattr(self.np_random, c_props[\"type\"])(**c_props[\"kw\"])\n if c_name.split(\"-\")[1] in [k.split(\"-\")[1] for k in constraints.keys()]:\n other_bound_type = \"u\" if c_name.split(\"-\")[2] == \"l\" else \"l\"\n other_bound_val = constraints[c_name[:-1] + other_bound_type]\n if other_bound_type == \"u\":\n constraint_val = min(other_bound_val - self.min_constraint_delta, constraint_val)\n else:\n constraint_val = max(other_bound_val + self.min_constraint_delta, constraint_val)\n constraints[c_name] = constraint_val\n return constraints\n\n def sample_state(self):\n state = {}\n for s_name, s_props in self.config[\"environment\"].get(\"randomize\", {}).get(\"state\", {}).items():\n state[s_name] = getattr(self.np_random, s_props[\"type\"])(**s_props[\"kw\"])\n\n return state\n\n def sample_reference(self):\n reference = {}\n for r_name, r_props in self.config[\"environment\"].get(\"randomize\", {}).get(\"reference\", {}).items():\n reference[r_name] = getattr(self.np_random, r_props[\"type\"])(**r_props[\"kw\"])\n\n return reference\n\n def sample_model(self):\n model = {}\n for s_name, s_props in self.config[\"environment\"].get(\"randomize\", {}).get(\"model\", {}).get(\"states\", {}).items():\n model[\"states\"] = {s_name: {}}\n for component_name, component_props in s_props.items():\n model[\"states\"][s_name][component_name] = \\\n {comp_v_name: getattr(self.np_random, v_prop[\"type\"])(**v_prop[\"kw\"])\n for comp_v_name, v_prop in component_props.items()}\n\n model = {dest: model for dest in self.config[\"environment\"].get(\"randomize\", {}).get(\"model\", {}).get(\"apply\", [])}\n return model\n\n def stop(self):\n pass\n\n def create_dataset(self, n_scenarios):\n dataset = []\n self.reset()\n for i in range(n_scenarios):\n process_noise = np.array([self.control_system._get_process_noise() for i in range(self.max_steps)])\n ep_dict = {\"state\": self.sample_state(), \"reference\": self.sample_reference(),\n \"constraint\": self.sample_constraints(), \"model\": self.sample_model(),\n \"process_noise\": {}, \"tvp\": {}}\n s_i = 0\n for s_name, s_props in self.config[\"plant\"][\"model\"][\"states\"].items():\n if \"W\" in s_props:\n ep_dict[\"process_noise\"][s_name] = process_noise[:, s_i]\n s_i += 1\n for tvp_name, tvp_obj in self.control_system.tvps.items():\n tvp_obj.generate_values(self.max_steps)\n ep_dict[\"tvp\"][tvp_name] = tvp_obj.values\n dataset.append(ep_dict)\n self.reset()\n\n return dataset\n\n def set_value_function(self, input_ph, output_ph, tf_session):\n self.control_system.controller.set_value_function(input_ph, output_ph, tf_session)\n self.value_function_is_set = True\n\n def set_learning_status(self, status):\n if self.value_function_is_set:\n self.control_system.controller.value_function.set_enabled(status)\n\n\nif __name__ == \"__main__\": # TODO: constraints on pendulum and end episode if constraints violated\n env = LetMPCEnv(\"configs/cart_pendulum_horizon.json\")\n env.seed(0)\n\n \"\"\"\n from tensorflow_casadi import TensorFlowEvaluator, MLP\n import tensorflow as tf\n a = tf.placeholder(shape=(None, 4), dtype=tf.float32)\n mlp = MLP(a)\n sess = tf.Session()\n val_fun = TensorFlowEvaluator([mlp.input_ph], [mlp.output], sess)\n env.set_value_function(mlp.input_ph, mlp.output, sess)\n \"\"\"\n\n import pickle\n with open(\"../../lmpc-horizon/datasets/cart_pendulum_10.pkl\", \"rb\") as f:\n test_set = pickle.load(f)\n\n rews = {}\n\n for i in range(1):\n import time\n obs = env.reset(**test_set[5])\n\n done = False\n t_before = time.process_time()\n horizon = 10\n while not done:\n t_step = time.process_time()\n if env.steps_count % 1 == 0 and False:\n horizon = 25 if horizon == 50 else 50\n obs, rew, done, info = env.step([horizon])#[np.random.randint(1, 10)])\n for rew_comp, v in info.items():\n if rew_comp.startswith(\"reward/\"):\n if rew_comp not in rews:\n rews[rew_comp] = []\n rews[rew_comp].append(v)\n if time.process_time() - t_step > 1:\n print(env.control_system.controller.mpc.solver_stats)\n print(env.steps_count)\n\n for k, v in rews.items():\n print(\"{}: {}\".format(k, sum(v)))\n print(\"Elapsed time {}\".format(time.process_time() - t_before))\n env.render()\n\n\n \n\n\n\n" ]
[ [ "numpy.sum", "numpy.clip", "matplotlib.pyplot.close", "numpy.isnan", "numpy.array", "numpy.finfo", "numpy.round" ] ]
vishalbelsare/estimagic
[ "afae1be3a1566056d11962c495b67e64bc4a0822" ]
[ "estimagic/tests/differentiation/test_derivatives.py" ]
[ "from functools import partial\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom pandas.testing import assert_frame_equal\nfrom scipy.optimize._numdiff import approx_derivative\n\nfrom estimagic.differentiation.derivatives import _consolidate_one_step_derivatives\nfrom estimagic.differentiation.derivatives import _convert_evaluation_data_to_frame\nfrom estimagic.differentiation.derivatives import (\n _convert_richardson_candidates_to_frame,\n)\nfrom estimagic.differentiation.derivatives import _nan_skipping_batch_evaluator\nfrom estimagic.differentiation.derivatives import _select_minimizer_along_axis\nfrom estimagic.differentiation.derivatives import first_derivative\nfrom estimagic.examples.numdiff_functions import logit_loglike\nfrom estimagic.examples.numdiff_functions import logit_loglike_gradient\nfrom estimagic.examples.numdiff_functions import logit_loglikeobs\nfrom estimagic.examples.numdiff_functions import logit_loglikeobs_jacobian\nfrom estimagic.utilities import namedtuple_from_kwargs\n\n\[email protected]\ndef binary_choice_inputs():\n fix_path = Path(__file__).resolve().parent / \"binary_choice_inputs.pickle\"\n inputs = pd.read_pickle(fix_path)\n return inputs\n\n\nmethods = [\"forward\", \"backward\", \"central\"]\n\n\[email protected](\"method\", methods)\ndef test_first_derivative_jacobian(binary_choice_inputs, method):\n fix = binary_choice_inputs\n func = partial(logit_loglikeobs, y=fix[\"y\"], x=fix[\"x\"])\n\n calculated = first_derivative(\n func=func,\n method=method,\n params=fix[\"params_np\"],\n n_steps=1,\n base_steps=None,\n lower_bounds=np.full(fix[\"params_np\"].shape, -np.inf),\n upper_bounds=np.full(fix[\"params_np\"].shape, np.inf),\n min_steps=1e-8,\n step_ratio=2.0,\n f0=func(fix[\"params_np\"]),\n n_cores=1,\n )\n\n expected = logit_loglikeobs_jacobian(fix[\"params_np\"], fix[\"y\"], fix[\"x\"])\n\n aaae(calculated[\"derivative\"], expected, decimal=6)\n\n\ndef test_first_derivative_jacobian_works_at_defaults(binary_choice_inputs):\n fix = binary_choice_inputs\n func = partial(logit_loglikeobs, y=fix[\"y\"], x=fix[\"x\"])\n calculated = first_derivative(func=func, params=fix[\"params_np\"], n_cores=1)\n expected = logit_loglikeobs_jacobian(fix[\"params_np\"], fix[\"y\"], fix[\"x\"])\n aaae(calculated[\"derivative\"], expected, decimal=6)\n\n\[email protected](\"method\", methods)\ndef test_first_derivative_gradient(binary_choice_inputs, method):\n fix = binary_choice_inputs\n func = partial(logit_loglike, y=fix[\"y\"], x=fix[\"x\"])\n\n calculated = first_derivative(\n func=func,\n method=method,\n params=fix[\"params_np\"],\n n_steps=1,\n f0=func(fix[\"params_np\"]),\n n_cores=1,\n )\n\n expected = logit_loglike_gradient(fix[\"params_np\"], fix[\"y\"], fix[\"x\"])\n\n aaae(calculated[\"derivative\"], expected, decimal=4)\n\n\[email protected](\"method\", methods)\ndef test_first_derivative_scalar(method):\n def f(x):\n return x ** 2\n\n calculated = first_derivative(f, 3.0, n_cores=1)\n expected = 6.0\n assert calculated[\"derivative\"] == expected\n\n\[email protected](\"method\", methods)\ndef test_first_derivative_scalar_with_return_func_value(method):\n def f(x):\n return x ** 2\n\n calculated = first_derivative(\n f, 3.0, return_func_value=True, return_info=False, n_cores=1\n )\n expected = {\"derivative\": 6.0, \"func_value\": 9.0}\n assert calculated == expected\n\n\ndef test_nan_skipping_batch_evaluator():\n arglist = [np.nan, np.ones(2), np.array([3, 4]), np.nan, np.array([1, 2])]\n expected = [\n np.full(2, np.nan),\n np.ones(2),\n np.array([9, 16]),\n np.full(2, np.nan),\n np.array([1, 4]),\n ]\n calculated = _nan_skipping_batch_evaluator(\n func=lambda x: x ** 2,\n arguments=arglist,\n n_cores=1,\n error_handling=\"continue\",\n batch_evaluator=\"joblib\",\n )\n for arr_calc, arr_exp in zip(calculated, expected):\n if np.isnan(arr_exp).all():\n assert np.isnan(arr_calc).all()\n else:\n aaae(arr_calc, arr_exp)\n\n\ndef test_consolidate_one_step_derivatives():\n forward = np.ones((1, 4, 3))\n forward[:, :, 0] = np.nan\n backward = np.zeros_like(forward)\n\n calculated = _consolidate_one_step_derivatives(\n {\"forward\": forward, \"backward\": backward}, [\"forward\", \"backward\"]\n )\n expected = np.array([[0, 1, 1]] * 4)\n aaae(calculated, expected)\n\n\[email protected]()\ndef example_function_gradient_fixtures():\n def f(x):\n \"\"\"f:R^3 -> R\"\"\"\n x1, x2, x3 = x[0], x[1], x[2]\n y1 = np.sin(x1) + np.cos(x2) + x3 - x3\n return y1\n\n def fprime(x):\n \"\"\"Gradient(f)(x):R^3 -> R^3\"\"\"\n x1, x2, x3 = x[0], x[1], x[2]\n grad = np.array([np.cos(x1), -np.sin(x2), x3 - x3])\n return grad\n\n return {\"func\": f, \"func_prime\": fprime}\n\n\[email protected]()\ndef example_function_jacobian_fixtures():\n def f(x):\n \"\"\"f:R^3 -> R^2\"\"\"\n x1, x2, x3 = x[0], x[1], x[2]\n y1, y2 = np.sin(x1) + np.cos(x2), np.exp(x3)\n return np.array([y1, y2])\n\n def fprime(x):\n \"\"\"Jacobian(f)(x):R^3 -> R^(2x3)\"\"\"\n x1, x2, x3 = x[0], x[1], x[2]\n jac = np.array([[np.cos(x1), -np.sin(x2), 0], [0, 0, np.exp(x3)]])\n return jac\n\n return {\"func\": f, \"func_prime\": fprime}\n\n\ndef test_first_derivative_gradient_richardson(example_function_gradient_fixtures):\n f = example_function_gradient_fixtures[\"func\"]\n fprime = example_function_gradient_fixtures[\"func_prime\"]\n\n true_fprime = fprime(np.ones(3))\n scipy_fprime = approx_derivative(f, np.ones(3))\n our_fprime = first_derivative(f, np.ones(3), n_steps=3, method=\"central\", n_cores=1)\n\n aaae(scipy_fprime, our_fprime[\"derivative\"])\n aaae(true_fprime, our_fprime[\"derivative\"])\n\n\ndef test_first_derivative_jacobian_richardson(example_function_jacobian_fixtures):\n f = example_function_jacobian_fixtures[\"func\"]\n fprime = example_function_jacobian_fixtures[\"func_prime\"]\n\n true_fprime = fprime(np.ones(3))\n scipy_fprime = approx_derivative(f, np.ones(3))\n our_fprime = first_derivative(f, np.ones(3), n_steps=3, method=\"central\", n_cores=1)\n\n aaae(scipy_fprime, our_fprime[\"derivative\"])\n aaae(true_fprime, our_fprime[\"derivative\"])\n\n\ndef test_convert_evaluation_data_to_frame():\n arr = np.arange(4).reshape(2, 2)\n arr2 = arr.reshape(2, 1, 2)\n steps = namedtuple_from_kwargs(pos=arr, neg=-arr)\n evals = namedtuple_from_kwargs(pos=arr2, neg=-arr2)\n expected = [\n [1, 0, 0, 0, 0, 0],\n [1, 0, 1, 0, 1, 1],\n [1, 1, 0, 0, 2, 2],\n [1, 1, 1, 0, 3, 3],\n [-1, 0, 0, 0, 0, 0],\n [-1, 0, 1, 0, 1, -1],\n [-1, 1, 0, 0, 2, -2],\n [-1, 1, 1, 0, 3, -3],\n ]\n expected = pd.DataFrame(\n expected, columns=[\"sign\", \"step_number\", \"dim_x\", \"dim_f\", \"step\", \"eval\"]\n )\n got = _convert_evaluation_data_to_frame(steps, evals)\n assert_frame_equal(expected, got.reset_index(), check_dtype=False)\n\n\ndef test__convert_richardson_candidates_to_frame():\n jac = {\n \"forward1\": np.array([[0, 1], [2, 3]]),\n \"forward2\": np.array([[0.5, 1], [2, 3]]),\n }\n err = {\n \"forward1\": np.array([[0, 0], [0, 1]]),\n \"forward2\": np.array([[1, 0], [0, 0]]),\n }\n expected = [\n [\"forward\", 1, 0, 0, 0, 0],\n [\"forward\", 1, 1, 0, 1, 0],\n [\"forward\", 1, 0, 1, 2, 0],\n [\"forward\", 1, 1, 1, 3, 1],\n [\"forward\", 2, 0, 0, 0.5, 1],\n [\"forward\", 2, 1, 0, 1, 0],\n [\"forward\", 2, 0, 1, 2, 0],\n [\"forward\", 2, 1, 1, 3, 0],\n ]\n expected = pd.DataFrame(\n expected, columns=[\"method\", \"num_term\", \"dim_x\", \"dim_f\", \"der\", \"err\"]\n )\n expected = expected.set_index([\"method\", \"num_term\", \"dim_x\", \"dim_f\"])\n got = _convert_richardson_candidates_to_frame(jac, err)\n assert_frame_equal(got, expected, check_dtype=False)\n\n\ndef test__select_minimizer_along_axis():\n der = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])\n err = np.array([[[0, 1], [1, 0]], [[1, 0], [0, 1]]])\n expected = (np.array([[0, 5], [6, 3]]), np.array([[0, 0], [0, 0]]))\n got = _select_minimizer_along_axis(der, err)\n aaae(expected, got)\n" ]
[ [ "numpy.ones", "pandas.read_pickle", "numpy.zeros_like", "pandas.DataFrame", "numpy.cos", "numpy.exp", "numpy.arange", "numpy.testing.assert_array_almost_equal", "numpy.full", "numpy.array", "numpy.sin", "pandas.testing.assert_frame_equal", "numpy.isnan" ] ]
weiwei1115/PaddleNLP
[ "dd98f7f8b25b41d39228ba8a958b11a6212709a3" ]
[ "examples/language_model/bert/run_glue.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport os\nimport sys\nimport random\nimport time\nimport math\nimport distutils.util\nfrom functools import partial\n\nimport numpy as np\nimport paddle\nfrom paddle.io import DataLoader\nfrom paddle.metric import Metric, Accuracy, Precision, Recall\n\nfrom paddlenlp.datasets import GlueCoLA, GlueSST2, GlueMRPC, GlueSTSB, GlueQQP, GlueMNLI, GlueQNLI, GlueRTE\nfrom paddlenlp.data import Stack, Tuple, Pad\nfrom paddlenlp.transformers import BertForSequenceClassification, BertTokenizer\nfrom paddlenlp.transformers import ElectraForSequenceClassification, ElectraTokenizer\nfrom paddlenlp.transformers import ErnieForSequenceClassification, ErnieTokenizer\nfrom paddlenlp.transformers import LinearDecayWithWarmup\nfrom paddlenlp.metrics import AccuracyAndF1, Mcc, PearsonAndSpearman\n\nFORMAT = '%(asctime)s-%(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\nTASK_CLASSES = {\n \"cola\": (GlueCoLA, Mcc),\n \"sst-2\": (GlueSST2, Accuracy),\n \"mrpc\": (GlueMRPC, AccuracyAndF1),\n \"sts-b\": (GlueSTSB, PearsonAndSpearman),\n \"qqp\": (GlueQQP, AccuracyAndF1),\n \"mnli\": (GlueMNLI, Accuracy),\n \"qnli\": (GlueQNLI, Accuracy),\n \"rte\": (GlueRTE, Accuracy),\n}\n\nMODEL_CLASSES = {\n \"bert\": (BertForSequenceClassification, BertTokenizer),\n \"ernie\": (ErnieForSequenceClassification, ErnieTokenizer)\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--task_name\",\n default=None,\n type=str,\n required=True,\n help=\"The name of the task to train selected in the list: \" +\n \", \".join(TASK_CLASSES.keys()), )\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" +\n \", \".join(MODEL_CLASSES.keys()), )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \"\n + \", \".join(\n sum([\n list(classes[-1].pretrained_init_configuration.keys())\n for classes in MODEL_CLASSES.values()\n ], [])), )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n parser.add_argument(\n \"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\", )\n parser.add_argument(\n \"--learning_rate\",\n default=1e-4,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\n \"--num_train_epochs\",\n default=3,\n type=int,\n help=\"Total number of training epochs to perform.\", )\n parser.add_argument(\n \"--logging_steps\",\n type=int,\n default=100,\n help=\"Log every X updates steps.\")\n parser.add_argument(\n \"--save_steps\",\n type=int,\n default=100,\n help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--batch_size\",\n default=32,\n type=int,\n help=\"Batch size per GPU/CPU for training.\", )\n parser.add_argument(\n \"--weight_decay\",\n default=0.0,\n type=float,\n help=\"Weight decay if we apply some.\")\n parser.add_argument(\n \"--warmup_steps\",\n default=0,\n type=int,\n help=\"Linear warmup over warmup_steps. If > 0: Override warmup_proportion\"\n )\n parser.add_argument(\n \"--warmup_proportion\",\n default=0.,\n type=float,\n help=\"Linear warmup proportion over total steps.\")\n parser.add_argument(\n \"--adam_epsilon\",\n default=1e-6,\n type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\n \"--seed\", default=42, type=int, help=\"random seed for initialization\")\n parser.add_argument(\n \"--n_cards\",\n default=1,\n type=int,\n help=\"Number cards for the training, only support multi cards in the gpu.\"\n )\n parser.add_argument(\n \"--select_device\",\n type=str,\n default=\"gpu\",\n help=\"Device for selecting for the training.\")\n parser.add_argument(\n \"--use_amp\",\n type=distutils.util.strtobool,\n default=False,\n help=\"Enable mixed precision training.\")\n parser.add_argument(\n \"--scale_loss\",\n type=float,\n default=2**15,\n help=\"The value of scale_loss for fp16.\")\n args = parser.parse_args()\n return args\n\n\ndef set_seed(args):\n # Use the same data seed(for data shuffle) for all procs to guarantee data\n # consistency after sharding.\n random.seed(args.seed)\n np.random.seed(args.seed)\n # Maybe different op seeds(for dropout) for different procs is better. By:\n # `paddle.seed(args.seed + paddle.distributed.get_rank())`\n paddle.seed(args.seed)\n\n\ndef evaluate(model, loss_fct, metric, data_loader):\n model.eval()\n metric.reset()\n for batch in data_loader:\n input_ids, segment_ids, labels = batch\n logits = model(input_ids, segment_ids)\n loss = loss_fct(logits, labels)\n correct = metric.compute(logits, labels)\n metric.update(correct)\n res = metric.accumulate()\n if isinstance(metric, AccuracyAndF1):\n logger.info(\n \"eval loss: %f, acc: %s, precision: %s, recall: %s, f1: %s, acc and f1: %s.\"\n % (loss.numpy(), res[0], res[1], res[2], res[3], res[4]))\n elif isinstance(metric, Mcc):\n logger.info(\"eval loss: %f, mcc: %s.\" % (loss.numpy(), res[0]))\n elif isinstance(metric, PearsonAndSpearman):\n logger.info(\n \"eval loss: %f, pearson: %s, spearman: %s, pearson and spearman: %s.\"\n % (loss.numpy(), res[0], res[1], res[2]))\n else:\n logger.info(\"eval loss: %f, acc: %s.\" % (loss.numpy(), res))\n model.train()\n\n\ndef convert_example(example,\n tokenizer,\n label_list,\n max_seq_length=512,\n is_test=False):\n \"\"\"convert a glue example into necessary features\"\"\"\n\n def _truncate_seqs(seqs, max_seq_length):\n if len(seqs) == 1: # single sentence\n # Account for [CLS] and [SEP] with \"- 2\"\n seqs[0] = seqs[0][0:(max_seq_length - 2)]\n else: # Sentence pair\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n tokens_a, tokens_b = seqs\n max_seq_length -= 3\n while True: # Truncate with longest_first strategy\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_seq_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n return seqs\n\n def _concat_seqs(seqs, separators, seq_mask=0, separator_mask=1):\n concat = sum((seq + sep for sep, seq in zip(separators, seqs)), [])\n segment_ids = sum(\n ([i] * (len(seq) + len(sep))\n for i, (sep, seq) in enumerate(zip(separators, seqs))), [])\n if isinstance(seq_mask, int):\n seq_mask = [[seq_mask] * len(seq) for seq in seqs]\n if isinstance(separator_mask, int):\n separator_mask = [[separator_mask] * len(sep) for sep in separators]\n p_mask = sum((s_mask + mask\n for sep, seq, s_mask, mask in zip(\n separators, seqs, seq_mask, separator_mask)), [])\n return concat, segment_ids, p_mask\n\n if not is_test:\n # `label_list == None` is for regression task\n label_dtype = \"int64\" if label_list else \"float32\"\n # Get the label\n label = example[-1]\n example = example[:-1]\n # Create label maps if classification task\n if label_list:\n label_map = {}\n for (i, l) in enumerate(label_list):\n label_map[l] = i\n label = label_map[label]\n label = np.array([label], dtype=label_dtype)\n\n # Tokenize raw text\n tokens_raw = [tokenizer(l) for l in example]\n # Truncate to the truncate_length,\n tokens_trun = _truncate_seqs(tokens_raw, max_seq_length)\n # Concate the sequences with special tokens\n tokens_trun[0] = [tokenizer.cls_token] + tokens_trun[0]\n tokens, segment_ids, _ = _concat_seqs(tokens_trun, [[tokenizer.sep_token]] *\n len(tokens_trun))\n # Convert the token to ids\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n valid_length = len(input_ids)\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n # input_mask = [1] * len(input_ids)\n if not is_test:\n return input_ids, segment_ids, valid_length, label\n else:\n return input_ids, segment_ids, valid_length\n\n\ndef do_train(args):\n paddle.set_device(args.select_device)\n if paddle.distributed.get_world_size() > 1:\n paddle.distributed.init_parallel_env()\n\n set_seed(args)\n\n args.task_name = args.task_name.lower()\n dataset_class, metric_class = TASK_CLASSES[args.task_name]\n args.model_type = args.model_type.lower()\n model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n\n train_dataset = dataset_class.get_datasets([\"train\"])\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n\n trans_func = partial(\n convert_example,\n tokenizer=tokenizer,\n label_list=train_dataset.get_labels(),\n max_seq_length=args.max_seq_length)\n train_dataset = train_dataset.apply(trans_func, lazy=True)\n train_batch_sampler = paddle.io.DistributedBatchSampler(\n train_dataset, batch_size=args.batch_size, shuffle=True)\n batchify_fn = lambda samples, fn=Tuple(\n Pad(axis=0, pad_val=tokenizer.pad_token_id), # input\n Pad(axis=0, pad_val=tokenizer.pad_token_id), # segment\n Stack(), # length\n Stack(dtype=\"int64\" if train_dataset.get_labels() else \"float32\") # label\n ): [data for i, data in enumerate(fn(samples)) if i != 2]\n train_data_loader = DataLoader(\n dataset=train_dataset,\n batch_sampler=train_batch_sampler,\n collate_fn=batchify_fn,\n num_workers=0,\n return_list=True)\n if args.task_name == \"mnli\":\n dev_dataset_matched, dev_dataset_mismatched = dataset_class.get_datasets(\n [\"dev_matched\", \"dev_mismatched\"])\n dev_dataset_matched = dev_dataset_matched.apply(trans_func, lazy=True)\n dev_dataset_mismatched = dev_dataset_mismatched.apply(\n trans_func, lazy=True)\n dev_batch_sampler_matched = paddle.io.BatchSampler(\n dev_dataset_matched, batch_size=args.batch_size, shuffle=False)\n dev_data_loader_matched = DataLoader(\n dataset=dev_dataset_matched,\n batch_sampler=dev_batch_sampler_matched,\n collate_fn=batchify_fn,\n num_workers=0,\n return_list=True)\n dev_batch_sampler_mismatched = paddle.io.BatchSampler(\n dev_dataset_mismatched, batch_size=args.batch_size, shuffle=False)\n dev_data_loader_mismatched = DataLoader(\n dataset=dev_dataset_mismatched,\n batch_sampler=dev_batch_sampler_mismatched,\n collate_fn=batchify_fn,\n num_workers=0,\n return_list=True)\n else:\n dev_dataset = dataset_class.get_datasets([\"dev\"])\n dev_dataset = dev_dataset.apply(trans_func, lazy=True)\n dev_batch_sampler = paddle.io.BatchSampler(\n dev_dataset, batch_size=args.batch_size, shuffle=False)\n dev_data_loader = DataLoader(\n dataset=dev_dataset,\n batch_sampler=dev_batch_sampler,\n collate_fn=batchify_fn,\n num_workers=0,\n return_list=True)\n\n num_classes = 1 if train_dataset.get_labels() == None else len(\n train_dataset.get_labels())\n model = model_class.from_pretrained(\n args.model_name_or_path, num_classes=num_classes)\n if paddle.distributed.get_world_size() > 1:\n model = paddle.DataParallel(model)\n\n num_training_steps = args.max_steps if args.max_steps > 0 else (\n len(train_data_loader) * args.num_train_epochs)\n warmup = args.warmup_steps if args.warmup_steps > 0 else args.warmup_proportion\n\n lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,\n warmup)\n\n optimizer = paddle.optimizer.AdamW(\n learning_rate=lr_scheduler,\n beta1=0.9,\n beta2=0.999,\n epsilon=args.adam_epsilon,\n parameters=model.parameters(),\n weight_decay=args.weight_decay,\n apply_decay_param_fun=lambda x: x in [\n p.name for n, p in model.named_parameters()\n if not any(nd in n for nd in [\"bias\", \"norm\"])\n ])\n\n loss_fct = paddle.nn.loss.CrossEntropyLoss() if train_dataset.get_labels(\n ) else paddle.nn.loss.MSELoss()\n\n metric = metric_class()\n\n if args.use_amp:\n scaler = paddle.amp.GradScaler(init_loss_scaling=args.scale_loss)\n\n global_step = 0\n tic_train = time.time()\n for epoch in range(args.num_train_epochs):\n for step, batch in enumerate(train_data_loader):\n global_step += 1\n input_ids, segment_ids, labels = batch\n with paddle.amp.auto_cast(\n args.use_amp,\n custom_white_list=[\"layer_norm\", \"softmax\", \"gelu\"]):\n logits = model(input_ids, segment_ids)\n loss = loss_fct(logits, labels)\n if args.use_amp:\n scaler.scale(loss).backward()\n scaler.minimize(optimizer, loss)\n else:\n loss.backward()\n optimizer.step()\n lr_scheduler.step()\n optimizer.clear_gradients()\n if global_step % args.logging_steps == 0:\n logger.info(\n \"global step %d/%d, epoch: %d, batch: %d, rank_id: %s, loss: %f, lr: %.10f, speed: %.4f step/s\"\n % (global_step, num_training_steps, epoch, step,\n paddle.distributed.get_rank(), loss, optimizer.get_lr(),\n args.logging_steps / (time.time() - tic_train)))\n tic_train = time.time()\n if global_step % args.save_steps == 0:\n tic_eval = time.time()\n if args.task_name == \"mnli\":\n evaluate(model, loss_fct, metric, dev_data_loader_matched)\n evaluate(model, loss_fct, metric,\n dev_data_loader_mismatched)\n logger.info(\"eval done total : %s s\" %\n (time.time() - tic_eval))\n else:\n evaluate(model, loss_fct, metric, dev_data_loader)\n logger.info(\"eval done total : %s s\" %\n (time.time() - tic_eval))\n if (not args.n_cards > 1) or paddle.distributed.get_rank() == 0:\n output_dir = os.path.join(args.output_dir,\n \"%s_ft_model_%d.pdparams\" %\n (args.task_name, global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # Need better way to get inner model of DataParallel\n model_to_save = model._layers if isinstance(\n model, paddle.DataParallel) else model\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n\ndef print_arguments(args):\n \"\"\"print arguments\"\"\"\n print('----------- Configuration Arguments -----------')\n for arg, value in sorted(vars(args).items()):\n print('%s: %s' % (arg, value))\n print('------------------------------------------------')\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n print_arguments(args)\n if args.n_cards > 1 and args.select_device == \"gpu\":\n paddle.distributed.spawn(do_train, args=(args, ), nprocs=args.n_cards)\n else:\n do_train(args)\n" ]
[ [ "numpy.array", "numpy.random.seed" ] ]
star10919/drf
[ "77c005794087484d72ffc0d76612a6ac9845821e", "77c005794087484d72ffc0d76612a6ac9845821e" ]
[ "venv/Lib/site-packages/sklearn/linear_model/_base.py", "venv/Lib/site-packages/sklearn/inspection/tests/test_partial_dependence.py" ]
[ "\"\"\"\nGeneralized Linear Models.\n\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>\n# Fabian Pedregosa <[email protected]>\n# Olivier Grisel <[email protected]>\n# Vincent Michel <[email protected]>\n# Peter Prettenhofer <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Lars Buitinck\n# Maryan Morel <[email protected]>\n# Giorgio Patrini <[email protected]>\n# License: BSD 3 clause\n\nfrom abc import ABCMeta, abstractmethod\nimport numbers\nimport warnings\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy import linalg\nfrom scipy import optimize\nfrom scipy import sparse\nfrom scipy.special import expit\nfrom joblib import Parallel\n\nfrom ..base import (BaseEstimator, ClassifierMixin, RegressorMixin,\n MultiOutputMixin)\nfrom ..utils import check_array\nfrom ..utils.validation import FLOAT_DTYPES\nfrom ..utils.validation import _deprecate_positional_args\nfrom ..utils import check_random_state\nfrom ..utils.extmath import safe_sparse_dot\nfrom ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale\nfrom ..utils.fixes import sparse_lsqr\nfrom ..utils._seq_dataset import ArrayDataset32, CSRDataset32\nfrom ..utils._seq_dataset import ArrayDataset64, CSRDataset64\nfrom ..utils.validation import check_is_fitted, _check_sample_weight\nfrom ..utils.fixes import delayed\nfrom ..preprocessing import normalize as f_normalize\n\n# TODO: bayesian_ridge_regression and bayesian_regression_ard\n# should be squashed into its respective objects.\n\nSPARSE_INTERCEPT_DECAY = 0.01\n# For sparse data intercept updates are scaled by this decay factor to avoid\n# intercept oscillation.\n\n\ndef make_dataset(X, y, sample_weight, random_state=None):\n \"\"\"Create ``Dataset`` abstraction for sparse and dense inputs.\n\n This also returns the ``intercept_decay`` which is different\n for sparse datasets.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data\n\n y : array-like, shape (n_samples, )\n Target values.\n\n sample_weight : numpy array of shape (n_samples,)\n The weight of each sample\n\n random_state : int, RandomState instance or None (default)\n Determines random number generation for dataset shuffling and noise.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n dataset\n The ``Dataset`` abstraction\n intercept_decay\n The intercept decay\n \"\"\"\n\n rng = check_random_state(random_state)\n # seed should never be 0 in SequentialDataset64\n seed = rng.randint(1, np.iinfo(np.int32).max)\n\n if X.dtype == np.float32:\n CSRData = CSRDataset32\n ArrayData = ArrayDataset32\n else:\n CSRData = CSRDataset64\n ArrayData = ArrayDataset64\n\n if sp.issparse(X):\n dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight,\n seed=seed)\n intercept_decay = SPARSE_INTERCEPT_DECAY\n else:\n X = np.ascontiguousarray(X)\n dataset = ArrayData(X, y, sample_weight, seed=seed)\n intercept_decay = 1.0\n\n return dataset, intercept_decay\n\n\ndef _preprocess_data(X, y, fit_intercept, normalize=False, copy=True,\n sample_weight=None, return_mean=False, check_input=True):\n \"\"\"Center and scale data.\n\n Centers data to have mean zero along axis 0. If fit_intercept=False or if\n the X is a sparse matrix, no centering is done, but normalization can still\n be applied. The function returns the statistics necessary to reconstruct\n the input data, which are X_offset, y_offset, X_scale, such that the output\n\n X = (X - X_offset) / X_scale\n\n X_scale is the L2 norm of X - X_offset. If sample_weight is not None,\n then the weighted mean of X and y is zero, and not the mean itself. If\n return_mean=True, the mean, eventually weighted, is returned, independently\n of whether X was centered (option used for optimization with sparse data in\n coordinate_descend).\n\n This is here because nearly all linear models will want their data to be\n centered. This function also systematically makes y consistent with X.dtype\n \"\"\"\n if isinstance(sample_weight, numbers.Number):\n sample_weight = None\n if sample_weight is not None:\n sample_weight = np.asarray(sample_weight)\n\n if check_input:\n X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'],\n dtype=FLOAT_DTYPES)\n elif copy:\n if sp.issparse(X):\n X = X.copy()\n else:\n X = X.copy(order='K')\n\n y = np.asarray(y, dtype=X.dtype)\n\n if fit_intercept:\n if sp.issparse(X):\n X_offset, X_var = mean_variance_axis(X, axis=0)\n if not return_mean:\n X_offset[:] = X.dtype.type(0)\n\n if normalize:\n\n # TODO: f_normalize could be used here as well but the function\n # inplace_csr_row_normalize_l2 must be changed such that it\n # can return also the norms computed internally\n\n # transform variance to norm in-place\n X_var *= X.shape[0]\n X_scale = np.sqrt(X_var, X_var)\n del X_var\n X_scale[X_scale == 0] = 1\n inplace_column_scale(X, 1. / X_scale)\n else:\n X_scale = np.ones(X.shape[1], dtype=X.dtype)\n\n else:\n X_offset = np.average(X, axis=0, weights=sample_weight)\n X -= X_offset\n if normalize:\n X, X_scale = f_normalize(X, axis=0, copy=False,\n return_norm=True)\n else:\n X_scale = np.ones(X.shape[1], dtype=X.dtype)\n y_offset = np.average(y, axis=0, weights=sample_weight)\n y = y - y_offset\n else:\n X_offset = np.zeros(X.shape[1], dtype=X.dtype)\n X_scale = np.ones(X.shape[1], dtype=X.dtype)\n if y.ndim == 1:\n y_offset = X.dtype.type(0)\n else:\n y_offset = np.zeros(y.shape[1], dtype=X.dtype)\n\n return X, y, X_offset, y_offset, X_scale\n\n\n# TODO: _rescale_data should be factored into _preprocess_data.\n# Currently, the fact that sag implements its own way to deal with\n# sample_weight makes the refactoring tricky.\n\ndef _rescale_data(X, y, sample_weight):\n \"\"\"Rescale data sample-wise by square root of sample_weight.\n\n For many linear models, this enables easy support for sample_weight.\n\n Returns\n -------\n X_rescaled : {array-like, sparse matrix}\n\n y_rescaled : {array-like, sparse matrix}\n \"\"\"\n n_samples = X.shape[0]\n sample_weight = np.asarray(sample_weight)\n if sample_weight.ndim == 0:\n sample_weight = np.full(n_samples, sample_weight,\n dtype=sample_weight.dtype)\n sample_weight = np.sqrt(sample_weight)\n sw_matrix = sparse.dia_matrix((sample_weight, 0),\n shape=(n_samples, n_samples))\n X = safe_sparse_dot(sw_matrix, X)\n y = safe_sparse_dot(sw_matrix, y)\n return X, y\n\n\nclass LinearModel(BaseEstimator, metaclass=ABCMeta):\n \"\"\"Base class for Linear Models\"\"\"\n\n @abstractmethod\n def fit(self, X, y):\n \"\"\"Fit model.\"\"\"\n\n def _decision_function(self, X):\n check_is_fitted(self)\n\n X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])\n return safe_sparse_dot(X, self.coef_.T,\n dense_output=True) + self.intercept_\n\n def predict(self, X):\n \"\"\"\n Predict using the linear model.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape (n_samples, n_features)\n Samples.\n\n Returns\n -------\n C : array, shape (n_samples,)\n Returns predicted values.\n \"\"\"\n return self._decision_function(X)\n\n _preprocess_data = staticmethod(_preprocess_data)\n\n def _set_intercept(self, X_offset, y_offset, X_scale):\n \"\"\"Set the intercept_\n \"\"\"\n if self.fit_intercept:\n self.coef_ = self.coef_ / X_scale\n self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)\n else:\n self.intercept_ = 0.\n\n def _more_tags(self):\n return {'requires_y': True}\n\n\n# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.\n# Maybe the n_features checking can be moved to LinearModel.\nclass LinearClassifierMixin(ClassifierMixin):\n \"\"\"Mixin for linear classifiers.\n\n Handles prediction for sparse and dense X.\n \"\"\"\n\n def decision_function(self, X):\n \"\"\"\n Predict confidence scores for samples.\n\n The confidence score for a sample is proportional to the signed\n distance of that sample to the hyperplane.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape (n_samples, n_features)\n Samples.\n\n Returns\n -------\n array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)\n Confidence scores per (sample, class) combination. In the binary\n case, confidence score for self.classes_[1] where >0 means this\n class would be predicted.\n \"\"\"\n check_is_fitted(self)\n\n X = check_array(X, accept_sparse='csr')\n\n n_features = self.coef_.shape[1]\n if X.shape[1] != n_features:\n raise ValueError(\"X has %d features per sample; expecting %d\"\n % (X.shape[1], n_features))\n\n scores = safe_sparse_dot(X, self.coef_.T,\n dense_output=True) + self.intercept_\n return scores.ravel() if scores.shape[1] == 1 else scores\n\n def predict(self, X):\n \"\"\"\n Predict class labels for samples in X.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape (n_samples, n_features)\n Samples.\n\n Returns\n -------\n C : array, shape [n_samples]\n Predicted class label per sample.\n \"\"\"\n scores = self.decision_function(X)\n if len(scores.shape) == 1:\n indices = (scores > 0).astype(int)\n else:\n indices = scores.argmax(axis=1)\n return self.classes_[indices]\n\n def _predict_proba_lr(self, X):\n \"\"\"Probability estimation for OvR logistic regression.\n\n Positive class probabilities are computed as\n 1. / (1. + np.exp(-self.decision_function(X)));\n multiclass is handled by normalizing that over all classes.\n \"\"\"\n prob = self.decision_function(X)\n expit(prob, out=prob)\n if prob.ndim == 1:\n return np.vstack([1 - prob, prob]).T\n else:\n # OvR normalization, like LibLinear's predict_probability\n prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))\n return prob\n\n\nclass SparseCoefMixin:\n \"\"\"Mixin for converting coef_ to and from CSR format.\n\n L1-regularizing estimators should inherit this.\n \"\"\"\n\n def densify(self):\n \"\"\"\n Convert coefficient matrix to dense array format.\n\n Converts the ``coef_`` member (back) to a numpy.ndarray. This is the\n default format of ``coef_`` and is required for fitting, so calling\n this method is only required on models that have previously been\n sparsified; otherwise, it is a no-op.\n\n Returns\n -------\n self\n Fitted estimator.\n \"\"\"\n msg = \"Estimator, %(name)s, must be fitted before densifying.\"\n check_is_fitted(self, msg=msg)\n if sp.issparse(self.coef_):\n self.coef_ = self.coef_.toarray()\n return self\n\n def sparsify(self):\n \"\"\"\n Convert coefficient matrix to sparse format.\n\n Converts the ``coef_`` member to a scipy.sparse matrix, which for\n L1-regularized models can be much more memory- and storage-efficient\n than the usual numpy.ndarray representation.\n\n The ``intercept_`` member is not converted.\n\n Returns\n -------\n self\n Fitted estimator.\n\n Notes\n -----\n For non-sparse models, i.e. when there are not many zeros in ``coef_``,\n this may actually *increase* memory usage, so use this method with\n care. A rule of thumb is that the number of zero elements, which can\n be computed with ``(coef_ == 0).sum()``, must be more than 50% for this\n to provide significant benefits.\n\n After calling this method, further fitting with the partial_fit\n method (if any) will not work until you call densify.\n \"\"\"\n msg = \"Estimator, %(name)s, must be fitted before sparsifying.\"\n check_is_fitted(self, msg=msg)\n self.coef_ = sp.csr_matrix(self.coef_)\n return self\n\n\nclass LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):\n \"\"\"\n Ordinary least squares Linear Regression.\n\n LinearRegression fits a linear model with coefficients w = (w1, ..., wp)\n to minimize the residual sum of squares between the observed targets in\n the dataset, and the targets predicted by the linear approximation.\n\n Parameters\n ----------\n fit_intercept : bool, default=True\n Whether to calculate the intercept for this model. If set\n to False, no intercept will be used in calculations\n (i.e. data is expected to be centered).\n\n normalize : bool, default=False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n copy_X : bool, default=True\n If True, X will be copied; else, it may be overwritten.\n\n n_jobs : int, default=None\n The number of jobs to use for the computation. This will only provide\n speedup for n_targets > 1 and sufficient large problems.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n positive : bool, default=False\n When set to ``True``, forces the coefficients to be positive. This\n option is only supported for dense arrays.\n\n .. versionadded:: 0.24\n\n Attributes\n ----------\n coef_ : array of shape (n_features, ) or (n_targets, n_features)\n Estimated coefficients for the linear regression problem.\n If multiple targets are passed during the fit (y 2D), this\n is a 2D array of shape (n_targets, n_features), while if only\n one target is passed, this is a 1D array of length n_features.\n\n rank_ : int\n Rank of matrix `X`. Only available when `X` is dense.\n\n singular_ : array of shape (min(X, y),)\n Singular values of `X`. Only available when `X` is dense.\n\n intercept_ : float or array of shape (n_targets,)\n Independent term in the linear model. Set to 0.0 if\n `fit_intercept = False`.\n\n See Also\n --------\n Ridge : Ridge regression addresses some of the\n problems of Ordinary Least Squares by imposing a penalty on the\n size of the coefficients with l2 regularization.\n Lasso : The Lasso is a linear model that estimates\n sparse coefficients with l1 regularization.\n ElasticNet : Elastic-Net is a linear regression\n model trained with both l1 and l2 -norm regularization of the\n coefficients.\n\n Notes\n -----\n From the implementation point of view, this is just plain Ordinary\n Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares\n (scipy.optimize.nnls) wrapped as a predictor object.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.linear_model import LinearRegression\n >>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])\n >>> # y = 1 * x_0 + 2 * x_1 + 3\n >>> y = np.dot(X, np.array([1, 2])) + 3\n >>> reg = LinearRegression().fit(X, y)\n >>> reg.score(X, y)\n 1.0\n >>> reg.coef_\n array([1., 2.])\n >>> reg.intercept_\n 3.0...\n >>> reg.predict(np.array([[3, 5]]))\n array([16.])\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, *, fit_intercept=True, normalize=False, copy_X=True,\n n_jobs=None, positive=False):\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.copy_X = copy_X\n self.n_jobs = n_jobs\n self.positive = positive\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"\n Fit linear model.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values. Will be cast to X's dtype if necessary\n\n sample_weight : array-like of shape (n_samples,), default=None\n Individual weights for each sample\n\n .. versionadded:: 0.17\n parameter *sample_weight* support to LinearRegression.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n\n n_jobs_ = self.n_jobs\n\n accept_sparse = False if self.positive else ['csr', 'csc', 'coo']\n\n X, y = self._validate_data(X, y, accept_sparse=accept_sparse,\n y_numeric=True, multi_output=True)\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X,\n dtype=X.dtype)\n\n X, y, X_offset, y_offset, X_scale = self._preprocess_data(\n X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,\n copy=self.copy_X, sample_weight=sample_weight,\n return_mean=True)\n\n if sample_weight is not None:\n # Sample weight can be implemented via a simple rescaling.\n X, y = _rescale_data(X, y, sample_weight)\n\n if self.positive:\n if y.ndim < 2:\n self.coef_, self._residues = optimize.nnls(X, y)\n else:\n # scipy.optimize.nnls cannot handle y with shape (M, K)\n outs = Parallel(n_jobs=n_jobs_)(\n delayed(optimize.nnls)(X, y[:, j])\n for j in range(y.shape[1]))\n self.coef_, self._residues = map(np.vstack, zip(*outs))\n elif sp.issparse(X):\n X_offset_scale = X_offset / X_scale\n\n def matvec(b):\n return X.dot(b) - b.dot(X_offset_scale)\n\n def rmatvec(b):\n return X.T.dot(b) - X_offset_scale * np.sum(b)\n\n X_centered = sparse.linalg.LinearOperator(shape=X.shape,\n matvec=matvec,\n rmatvec=rmatvec)\n\n if y.ndim < 2:\n out = sparse_lsqr(X_centered, y)\n self.coef_ = out[0]\n self._residues = out[3]\n else:\n # sparse_lstsq cannot handle y with shape (M, K)\n outs = Parallel(n_jobs=n_jobs_)(\n delayed(sparse_lsqr)(X_centered, y[:, j].ravel())\n for j in range(y.shape[1]))\n self.coef_ = np.vstack([out[0] for out in outs])\n self._residues = np.vstack([out[3] for out in outs])\n else:\n self.coef_, self._residues, self.rank_, self.singular_ = \\\n linalg.lstsq(X, y)\n self.coef_ = self.coef_.T\n\n if y.ndim == 1:\n self.coef_ = np.ravel(self.coef_)\n self._set_intercept(X_offset, y_offset, X_scale)\n return self\n\n\ndef _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy,\n check_input=True, sample_weight=None):\n \"\"\"Aux function used at beginning of fit in linear models\n\n Parameters\n ----------\n order : 'F', 'C' or None, default=None\n Whether X and y will be forced to be fortran or c-style. Only relevant\n if sample_weight is not None.\n \"\"\"\n n_samples, n_features = X.shape\n\n if sparse.isspmatrix(X):\n # copy is not needed here as X is not modified inplace when X is sparse\n precompute = False\n X, y, X_offset, y_offset, X_scale = _preprocess_data(\n X, y, fit_intercept=fit_intercept, normalize=normalize,\n copy=False, return_mean=True, check_input=check_input)\n else:\n # copy was done in fit if necessary\n X, y, X_offset, y_offset, X_scale = _preprocess_data(\n X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy,\n check_input=check_input, sample_weight=sample_weight)\n if sample_weight is not None:\n X, y = _rescale_data(X, y, sample_weight=sample_weight)\n if hasattr(precompute, '__array__') and (\n fit_intercept and not np.allclose(X_offset, np.zeros(n_features)) or\n normalize and not np.allclose(X_scale, np.ones(n_features))):\n warnings.warn(\"Gram matrix was provided but X was centered\"\n \" to fit intercept, \"\n \"or X was normalized : recomputing Gram matrix.\",\n UserWarning)\n # recompute Gram\n precompute = 'auto'\n Xy = None\n\n # precompute if n_samples > n_features\n if isinstance(precompute, str) and precompute == 'auto':\n precompute = (n_samples > n_features)\n\n if precompute is True:\n # make sure that the 'precompute' array is contiguous.\n precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype,\n order='C')\n np.dot(X.T, X, out=precompute)\n\n if not hasattr(precompute, '__array__'):\n Xy = None # cannot use Xy if precompute is not Gram\n\n if hasattr(precompute, '__array__') and Xy is None:\n common_dtype = np.find_common_type([X.dtype, y.dtype], [])\n if y.ndim == 1:\n # Xy is 1d, make sure it is contiguous.\n Xy = np.empty(shape=n_features, dtype=common_dtype, order='C')\n np.dot(X.T, y, out=Xy)\n else:\n # Make sure that Xy is always F contiguous even if X or y are not\n # contiguous: the goal is to make it fast to extract the data for a\n # specific target.\n n_targets = y.shape[1]\n Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype,\n order='F')\n np.dot(y.T, X, out=Xy.T)\n\n return X, y, X_offset, y_offset, X_scale, precompute, Xy\n", "\"\"\"\nTesting for the partial dependence module.\n\"\"\"\n\nimport numpy as np\nimport pytest\n\nimport sklearn\nfrom sklearn.inspection import partial_dependence\nfrom sklearn.inspection._partial_dependence import (\n _grid_from_X,\n _partial_dependence_brute,\n _partial_dependence_recursion\n)\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.experimental import enable_hist_gradient_boosting # noqa\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom sklearn.ensemble import HistGradientBoostingRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import MultiTaskLasso\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.datasets import load_iris\nfrom sklearn.datasets import make_classification, make_regression\nfrom sklearn.cluster import KMeans\nfrom sklearn.compose import make_column_transformer\nfrom sklearn.metrics import r2_score\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.preprocessing import scale\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.base import BaseEstimator, ClassifierMixin, clone\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.utils._testing import assert_allclose\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils import _IS_32BIT\nfrom sklearn.utils.validation import check_random_state\nfrom sklearn.tree.tests.test_tree import assert_is_subtree\n\n\n# toy sample\nX = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]\ny = [-1, -1, -1, 1, 1, 1]\n\n\n# (X, y), n_targets <-- as expected in the output of partial_dep()\nbinary_classification_data = (make_classification(n_samples=50,\n random_state=0), 1)\nmulticlass_classification_data = (make_classification(n_samples=50,\n n_classes=3,\n n_clusters_per_class=1,\n random_state=0), 3)\nregression_data = (make_regression(n_samples=50, random_state=0), 1)\nmultioutput_regression_data = (make_regression(n_samples=50, n_targets=2,\n random_state=0), 2)\n\n# iris\niris = load_iris()\n\n\[email protected](\"ignore:A Bunch will be returned\")\[email protected]('Estimator, method, data', [\n (GradientBoostingClassifier, 'auto', binary_classification_data),\n (GradientBoostingClassifier, 'auto', multiclass_classification_data),\n (GradientBoostingClassifier, 'brute', binary_classification_data),\n (GradientBoostingClassifier, 'brute', multiclass_classification_data),\n (GradientBoostingRegressor, 'auto', regression_data),\n (GradientBoostingRegressor, 'brute', regression_data),\n (DecisionTreeRegressor, 'brute', regression_data),\n (LinearRegression, 'brute', regression_data),\n (LinearRegression, 'brute', multioutput_regression_data),\n (LogisticRegression, 'brute', binary_classification_data),\n (LogisticRegression, 'brute', multiclass_classification_data),\n (MultiTaskLasso, 'brute', multioutput_regression_data),\n ])\[email protected]('grid_resolution', (5, 10))\[email protected]('features', ([1], [1, 2]))\[email protected]('kind', ('legacy', 'average', 'individual', 'both'))\ndef test_output_shape(Estimator, method, data, grid_resolution,\n features, kind):\n # Check that partial_dependence has consistent output shape for different\n # kinds of estimators:\n # - classifiers with binary and multiclass settings\n # - regressors\n # - multi-task regressors\n\n est = Estimator()\n\n # n_target corresponds to the number of classes (1 for binary classif) or\n # the number of tasks / outputs in multi task settings. It's equal to 1 for\n # classical regression_data.\n (X, y), n_targets = data\n n_instances = X.shape[0]\n\n est.fit(X, y)\n result = partial_dependence(\n est, X=X, features=features, method=method, kind=kind,\n grid_resolution=grid_resolution\n )\n # FIXME: Remove 'legacy' support in 1.1\n pdp, axes = result if kind == 'legacy' else (result, result[\"values\"])\n\n expected_pdp_shape = (n_targets,\n *[grid_resolution for _ in range(len(features))])\n expected_ice_shape = (n_targets, n_instances,\n *[grid_resolution for _ in range(len(features))])\n if kind == 'legacy':\n assert pdp.shape == expected_pdp_shape\n elif kind == 'average':\n assert pdp.average.shape == expected_pdp_shape\n elif kind == 'individual':\n assert pdp.individual.shape == expected_ice_shape\n else: # 'both'\n assert pdp.average.shape == expected_pdp_shape\n assert pdp.individual.shape == expected_ice_shape\n\n expected_axes_shape = (len(features), grid_resolution)\n assert axes is not None\n assert np.asarray(axes).shape == expected_axes_shape\n\n\ndef test_grid_from_X():\n # tests for _grid_from_X: sanity check for output, and for shapes.\n\n # Make sure that the grid is a cartesian product of the input (it will use\n # the unique values instead of the percentiles)\n percentiles = (.05, .95)\n grid_resolution = 100\n X = np.asarray([[1, 2],\n [3, 4]])\n grid, axes = _grid_from_X(X, percentiles, grid_resolution)\n assert_array_equal(grid, [[1, 2],\n [1, 4],\n [3, 2],\n [3, 4]])\n assert_array_equal(axes, X.T)\n\n # test shapes of returned objects depending on the number of unique values\n # for a feature.\n rng = np.random.RandomState(0)\n grid_resolution = 15\n\n # n_unique_values > grid_resolution\n X = rng.normal(size=(20, 2))\n grid, axes = _grid_from_X(X, percentiles, grid_resolution=grid_resolution)\n assert grid.shape == (grid_resolution * grid_resolution, X.shape[1])\n assert np.asarray(axes).shape == (2, grid_resolution)\n\n # n_unique_values < grid_resolution, will use actual values\n n_unique_values = 12\n X[n_unique_values - 1:, 0] = 12345\n rng.shuffle(X) # just to make sure the order is irrelevant\n grid, axes = _grid_from_X(X, percentiles, grid_resolution=grid_resolution)\n assert grid.shape == (n_unique_values * grid_resolution, X.shape[1])\n # axes is a list of arrays of different shapes\n assert axes[0].shape == (n_unique_values,)\n assert axes[1].shape == (grid_resolution,)\n\n\[email protected](\n \"grid_resolution, percentiles, err_msg\",\n [(2, (0, 0.0001), \"percentiles are too close\"),\n (100, (1, 2, 3, 4), \"'percentiles' must be a sequence of 2 elements\"),\n (100, 12345, \"'percentiles' must be a sequence of 2 elements\"),\n (100, (-1, .95), r\"'percentiles' values must be in \\[0, 1\\]\"),\n (100, (.05, 2), r\"'percentiles' values must be in \\[0, 1\\]\"),\n (100, (.9, .1), r\"percentiles\\[0\\] must be strictly less than\"),\n (1, (0.05, 0.95), \"'grid_resolution' must be strictly greater than 1\")]\n)\ndef test_grid_from_X_error(grid_resolution, percentiles, err_msg):\n X = np.asarray([[1, 2], [3, 4]])\n with pytest.raises(ValueError, match=err_msg):\n _grid_from_X(\n X, grid_resolution=grid_resolution, percentiles=percentiles\n )\n\n\[email protected]('target_feature', range(5))\[email protected]('est, method', [\n (LinearRegression(), 'brute'),\n (GradientBoostingRegressor(random_state=0), 'brute'),\n (GradientBoostingRegressor(random_state=0), 'recursion'),\n (HistGradientBoostingRegressor(random_state=0), 'brute'),\n (HistGradientBoostingRegressor(random_state=0), 'recursion')]\n)\ndef test_partial_dependence_helpers(est, method, target_feature):\n # Check that what is returned by _partial_dependence_brute or\n # _partial_dependence_recursion is equivalent to manually setting a target\n # feature to a given value, and computing the average prediction over all\n # samples.\n # This also checks that the brute and recursion methods give the same\n # output.\n # Note that even on the trainset, the brute and the recursion methods\n # aren't always strictly equivalent, in particular when the slow method\n # generates unrealistic samples that have low mass in the joint\n # distribution of the input features, and when some of the features are\n # dependent. Hence the high tolerance on the checks.\n\n X, y = make_regression(random_state=0, n_features=5, n_informative=5)\n # The 'init' estimator for GBDT (here the average prediction) isn't taken\n # into account with the recursion method, for technical reasons. We set\n # the mean to 0 to that this 'bug' doesn't have any effect.\n y = y - y.mean()\n est.fit(X, y)\n\n # target feature will be set to .5 and then to 123\n features = np.array([target_feature], dtype=np.int32)\n grid = np.array([[.5],\n [123]])\n\n if method == 'brute':\n pdp, predictions = _partial_dependence_brute(est, grid, features, X,\n response_method='auto')\n else:\n pdp = _partial_dependence_recursion(est, grid, features)\n\n mean_predictions = []\n for val in (.5, 123):\n X_ = X.copy()\n X_[:, target_feature] = val\n mean_predictions.append(est.predict(X_).mean())\n\n pdp = pdp[0] # (shape is (1, 2) so make it (2,))\n\n # allow for greater margin for error with recursion method\n rtol = 1e-1 if method == 'recursion' else 1e-3\n assert np.allclose(pdp, mean_predictions, rtol=rtol)\n\n\[email protected]('seed', range(1))\ndef test_recursion_decision_tree_vs_forest_and_gbdt(seed):\n # Make sure that the recursion method gives the same results on a\n # DecisionTreeRegressor and a GradientBoostingRegressor or a\n # RandomForestRegressor with 1 tree and equivalent parameters.\n\n rng = np.random.RandomState(seed)\n\n # Purely random dataset to avoid correlated features\n n_samples = 1000\n n_features = 5\n X = rng.randn(n_samples, n_features)\n y = rng.randn(n_samples) * 10\n\n # The 'init' estimator for GBDT (here the average prediction) isn't taken\n # into account with the recursion method, for technical reasons. We set\n # the mean to 0 to that this 'bug' doesn't have any effect.\n y = y - y.mean()\n\n # set max_depth not too high to avoid splits with same gain but different\n # features\n max_depth = 5\n\n tree_seed = 0\n forest = RandomForestRegressor(n_estimators=1, max_features=None,\n bootstrap=False, max_depth=max_depth,\n random_state=tree_seed)\n # The forest will use ensemble.base._set_random_states to set the\n # random_state of the tree sub-estimator. We simulate this here to have\n # equivalent estimators.\n equiv_random_state = check_random_state(tree_seed).randint(\n np.iinfo(np.int32).max)\n gbdt = GradientBoostingRegressor(n_estimators=1, learning_rate=1,\n criterion='mse', max_depth=max_depth,\n random_state=equiv_random_state)\n tree = DecisionTreeRegressor(max_depth=max_depth,\n random_state=equiv_random_state)\n\n forest.fit(X, y)\n gbdt.fit(X, y)\n tree.fit(X, y)\n\n # sanity check: if the trees aren't the same, the PD values won't be equal\n try:\n assert_is_subtree(tree.tree_, gbdt[0, 0].tree_)\n assert_is_subtree(tree.tree_, forest[0].tree_)\n except AssertionError:\n # For some reason the trees aren't exactly equal on 32bits, so the PDs\n # cannot be equal either. See\n # https://github.com/scikit-learn/scikit-learn/issues/8853\n assert _IS_32BIT, \"this should only fail on 32 bit platforms\"\n return\n\n grid = rng.randn(50).reshape(-1, 1)\n for f in range(n_features):\n features = np.array([f], dtype=np.int32)\n\n pdp_forest = _partial_dependence_recursion(forest, grid, features)\n pdp_gbdt = _partial_dependence_recursion(gbdt, grid, features)\n pdp_tree = _partial_dependence_recursion(tree, grid, features)\n\n np.testing.assert_allclose(pdp_gbdt, pdp_tree)\n np.testing.assert_allclose(pdp_forest, pdp_tree)\n\n\[email protected]('est', (\n GradientBoostingClassifier(random_state=0),\n HistGradientBoostingClassifier(random_state=0),\n))\[email protected]('target_feature', (0, 1, 2, 3, 4, 5))\ndef test_recursion_decision_function(est, target_feature):\n # Make sure the recursion method (implicitly uses decision_function) has\n # the same result as using brute method with\n # response_method=decision_function\n\n X, y = make_classification(n_classes=2, n_clusters_per_class=1,\n random_state=1)\n assert np.mean(y) == .5 # make sure the init estimator predicts 0 anyway\n\n est.fit(X, y)\n\n preds_1 = partial_dependence(\n est, X, [target_feature], response_method='decision_function',\n method='recursion', kind='average'\n )\n preds_2 = partial_dependence(\n est, X, [target_feature], response_method='decision_function',\n method='brute', kind='average'\n )\n\n assert_allclose(preds_1['average'], preds_2['average'], atol=1e-7)\n\n\[email protected]('est', (\n LinearRegression(),\n GradientBoostingRegressor(random_state=0),\n HistGradientBoostingRegressor(random_state=0, min_samples_leaf=1,\n max_leaf_nodes=None, max_iter=1),\n DecisionTreeRegressor(random_state=0),\n))\[email protected]('power', (1, 2))\ndef test_partial_dependence_easy_target(est, power):\n # If the target y only depends on one feature in an obvious way (linear or\n # quadratic) then the partial dependence for that feature should reflect\n # it.\n # We here fit a linear regression_data model (with polynomial features if\n # needed) and compute r_squared to check that the partial dependence\n # correctly reflects the target.\n\n rng = np.random.RandomState(0)\n n_samples = 200\n target_variable = 2\n X = rng.normal(size=(n_samples, 5))\n y = X[:, target_variable]**power\n\n est.fit(X, y)\n\n pdp = partial_dependence(\n est, features=[target_variable], X=X, grid_resolution=1000,\n kind='average'\n )\n\n new_X = pdp[\"values\"][0].reshape(-1, 1)\n new_y = pdp['average'][0]\n # add polynomial features if needed\n new_X = PolynomialFeatures(degree=power).fit_transform(new_X)\n\n lr = LinearRegression().fit(new_X, new_y)\n r2 = r2_score(new_y, lr.predict(new_X))\n\n assert r2 > .99\n\n\[email protected]('Estimator',\n (sklearn.tree.DecisionTreeClassifier,\n sklearn.tree.ExtraTreeClassifier,\n sklearn.ensemble.ExtraTreesClassifier,\n sklearn.neighbors.KNeighborsClassifier,\n sklearn.neighbors.RadiusNeighborsClassifier,\n sklearn.ensemble.RandomForestClassifier))\ndef test_multiclass_multioutput(Estimator):\n # Make sure error is raised for multiclass-multioutput classifiers\n\n # make multiclass-multioutput dataset\n X, y = make_classification(n_classes=3, n_clusters_per_class=1,\n random_state=0)\n y = np.array([y, y]).T\n\n est = Estimator()\n est.fit(X, y)\n\n with pytest.raises(\n ValueError,\n match=\"Multiclass-multioutput estimators are not supported\"):\n partial_dependence(est, X, [0])\n\n\nclass NoPredictProbaNoDecisionFunction(ClassifierMixin, BaseEstimator):\n def fit(self, X, y):\n # simulate that we have some classes\n self.classes_ = [0, 1]\n return self\n\n\[email protected](\"ignore:A Bunch will be returned\")\[email protected](\n \"estimator, params, err_msg\",\n [(KMeans(),\n {'features': [0]},\n \"'estimator' must be a fitted regressor or classifier\"),\n (LinearRegression(),\n {'features': [0], 'response_method': 'predict_proba'},\n 'The response_method parameter is ignored for regressors'),\n (GradientBoostingClassifier(random_state=0),\n {'features': [0], 'response_method': 'predict_proba',\n 'method': 'recursion'},\n \"'recursion' method, the response_method must be 'decision_function'\"),\n (GradientBoostingClassifier(random_state=0),\n {'features': [0], 'response_method': 'predict_proba', 'method': 'auto'},\n \"'recursion' method, the response_method must be 'decision_function'\"),\n (GradientBoostingClassifier(random_state=0),\n {'features': [0], 'response_method': 'blahblah'},\n 'response_method blahblah is invalid. Accepted response_method'),\n (NoPredictProbaNoDecisionFunction(),\n {'features': [0], 'response_method': 'auto'},\n 'The estimator has no predict_proba and no decision_function method'),\n (NoPredictProbaNoDecisionFunction(),\n {'features': [0], 'response_method': 'predict_proba'},\n 'The estimator has no predict_proba method.'),\n (NoPredictProbaNoDecisionFunction(),\n {'features': [0], 'response_method': 'decision_function'},\n 'The estimator has no decision_function method.'),\n (LinearRegression(),\n {'features': [0], 'method': 'blahblah'},\n 'blahblah is invalid. Accepted method names are brute, recursion, auto'),\n (LinearRegression(),\n {'features': [0], 'method': 'recursion', 'kind': 'individual'},\n \"The 'recursion' method only applies when 'kind' is set to 'average'\"),\n (LinearRegression(),\n {'features': [0], 'method': 'recursion', 'kind': 'both'},\n \"The 'recursion' method only applies when 'kind' is set to 'average'\"),\n (LinearRegression(),\n {'features': [0], 'method': 'recursion'},\n \"Only the following estimators support the 'recursion' method:\")]\n)\ndef test_partial_dependence_error(estimator, params, err_msg):\n X, y = make_classification(random_state=0)\n estimator.fit(X, y)\n\n with pytest.raises(ValueError, match=err_msg):\n partial_dependence(estimator, X, **params)\n\n\[email protected](\n \"with_dataframe, err_msg\",\n [(True, \"Only array-like or scalar are supported\"),\n (False, \"Only array-like or scalar are supported\")]\n)\ndef test_partial_dependence_slice_error(with_dataframe, err_msg):\n X, y = make_classification(random_state=0)\n if with_dataframe:\n pd = pytest.importorskip('pandas')\n X = pd.DataFrame(X)\n estimator = LogisticRegression().fit(X, y)\n\n with pytest.raises(TypeError, match=err_msg):\n partial_dependence(estimator, X, features=slice(0, 2, 1))\n\n\[email protected](\n 'estimator',\n [LinearRegression(), GradientBoostingClassifier(random_state=0)]\n)\[email protected]('features', [-1, 10000])\ndef test_partial_dependence_unknown_feature_indices(estimator, features):\n X, y = make_classification(random_state=0)\n estimator.fit(X, y)\n\n err_msg = 'all features must be in'\n with pytest.raises(ValueError, match=err_msg):\n partial_dependence(estimator, X, [features])\n\n\[email protected](\n 'estimator',\n [LinearRegression(), GradientBoostingClassifier(random_state=0)]\n)\ndef test_partial_dependence_unknown_feature_string(estimator):\n pd = pytest.importorskip(\"pandas\")\n X, y = make_classification(random_state=0)\n df = pd.DataFrame(X)\n estimator.fit(df, y)\n\n features = ['random']\n err_msg = 'A given column is not a column of the dataframe'\n with pytest.raises(ValueError, match=err_msg):\n partial_dependence(estimator, df, features)\n\n\[email protected](\n 'estimator',\n [LinearRegression(), GradientBoostingClassifier(random_state=0)]\n)\ndef test_partial_dependence_X_list(estimator):\n # check that array-like objects are accepted\n X, y = make_classification(random_state=0)\n estimator.fit(X, y)\n partial_dependence(estimator, list(X), [0], kind='average')\n\n\ndef test_warning_recursion_non_constant_init():\n # make sure that passing a non-constant init parameter to a GBDT and using\n # recursion method yields a warning.\n\n gbc = GradientBoostingClassifier(init=DummyClassifier(), random_state=0)\n gbc.fit(X, y)\n\n with pytest.warns(\n UserWarning,\n match='Using recursion method with a non-constant init predictor'):\n partial_dependence(gbc, X, [0], method='recursion', kind='average')\n\n with pytest.warns(\n UserWarning,\n match='Using recursion method with a non-constant init predictor'):\n partial_dependence(gbc, X, [0], method='recursion', kind='average')\n\n\ndef test_partial_dependence_sample_weight():\n # Test near perfect correlation between partial dependence and diagonal\n # when sample weights emphasize y = x predictions\n # non-regression test for #13193\n # TODO: extend to HistGradientBoosting once sample_weight is supported\n N = 1000\n rng = np.random.RandomState(123456)\n mask = rng.randint(2, size=N, dtype=bool)\n\n x = rng.rand(N)\n # set y = x on mask and y = -x outside\n y = x.copy()\n y[~mask] = -y[~mask]\n X = np.c_[mask, x]\n # sample weights to emphasize data points where y = x\n sample_weight = np.ones(N)\n sample_weight[mask] = 1000.\n\n clf = GradientBoostingRegressor(n_estimators=10, random_state=1)\n clf.fit(X, y, sample_weight=sample_weight)\n\n pdp = partial_dependence(clf, X, features=[1], kind='average')\n\n assert np.corrcoef(pdp['average'], pdp[\"values\"])[0, 1] > 0.99\n\n\ndef test_hist_gbdt_sw_not_supported():\n # TODO: remove/fix when PDP supports HGBT with sample weights\n clf = HistGradientBoostingRegressor(random_state=1)\n clf.fit(X, y, sample_weight=np.ones(len(X)))\n\n with pytest.raises(NotImplementedError,\n match=\"does not support partial dependence\"):\n partial_dependence(clf, X, features=[1])\n\n\ndef test_partial_dependence_pipeline():\n # check that the partial dependence support pipeline\n iris = load_iris()\n\n scaler = StandardScaler()\n clf = DummyClassifier(random_state=42)\n pipe = make_pipeline(scaler, clf)\n\n clf.fit(scaler.fit_transform(iris.data), iris.target)\n pipe.fit(iris.data, iris.target)\n\n features = 0\n pdp_pipe = partial_dependence(\n pipe, iris.data, features=[features], grid_resolution=10,\n kind='average'\n )\n pdp_clf = partial_dependence(\n clf, scaler.transform(iris.data), features=[features],\n grid_resolution=10, kind='average'\n )\n assert_allclose(pdp_pipe['average'], pdp_clf['average'])\n assert_allclose(\n pdp_pipe[\"values\"][0],\n pdp_clf[\"values\"][0] * scaler.scale_[features] + scaler.mean_[features]\n )\n\n\[email protected](\n \"estimator\",\n [LogisticRegression(max_iter=1000, random_state=0),\n GradientBoostingClassifier(random_state=0, n_estimators=5)],\n ids=['estimator-brute', 'estimator-recursion']\n)\[email protected](\n \"preprocessor\",\n [None,\n make_column_transformer(\n (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),\n (RobustScaler(), [iris.feature_names[i] for i in (1, 3)])),\n make_column_transformer(\n (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),\n remainder='passthrough')],\n ids=['None', 'column-transformer', 'column-transformer-passthrough']\n)\[email protected](\n \"features\",\n [[0, 2], [iris.feature_names[i] for i in (0, 2)]],\n ids=['features-integer', 'features-string']\n)\ndef test_partial_dependence_dataframe(estimator, preprocessor, features):\n # check that the partial dependence support dataframe and pipeline\n # including a column transformer\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame(scale(iris.data), columns=iris.feature_names)\n\n pipe = make_pipeline(preprocessor, estimator)\n pipe.fit(df, iris.target)\n pdp_pipe = partial_dependence(\n pipe, df, features=features, grid_resolution=10, kind='average'\n )\n\n # the column transformer will reorder the column when transforming\n # we mixed the index to be sure that we are computing the partial\n # dependence of the right columns\n if preprocessor is not None:\n X_proc = clone(preprocessor).fit_transform(df)\n features_clf = [0, 1]\n else:\n X_proc = df\n features_clf = [0, 2]\n\n clf = clone(estimator).fit(X_proc, iris.target)\n pdp_clf = partial_dependence(\n clf, X_proc, features=features_clf, method='brute', grid_resolution=10,\n kind='average'\n )\n\n assert_allclose(pdp_pipe['average'], pdp_clf['average'])\n if preprocessor is not None:\n scaler = preprocessor.named_transformers_['standardscaler']\n assert_allclose(\n pdp_pipe[\"values\"][1],\n pdp_clf[\"values\"][1] * scaler.scale_[1] + scaler.mean_[1]\n )\n else:\n assert_allclose(pdp_pipe[\"values\"][1], pdp_clf[\"values\"][1])\n\n\[email protected](\n \"features, expected_pd_shape\",\n [(0, (3, 10)),\n (iris.feature_names[0], (3, 10)),\n ([0, 2], (3, 10, 10)),\n ([iris.feature_names[i] for i in (0, 2)], (3, 10, 10)),\n ([True, False, True, False], (3, 10, 10))],\n ids=['scalar-int', 'scalar-str', 'list-int', 'list-str', 'mask']\n)\ndef test_partial_dependence_feature_type(features, expected_pd_shape):\n # check all possible features type supported in PDP\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame(iris.data, columns=iris.feature_names)\n\n preprocessor = make_column_transformer(\n (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),\n (RobustScaler(), [iris.feature_names[i] for i in (1, 3)])\n )\n pipe = make_pipeline(\n preprocessor, LogisticRegression(max_iter=1000, random_state=0)\n )\n pipe.fit(df, iris.target)\n pdp_pipe = partial_dependence(\n pipe, df, features=features, grid_resolution=10, kind='average'\n )\n assert pdp_pipe['average'].shape == expected_pd_shape\n assert len(pdp_pipe[\"values\"]) == len(pdp_pipe['average'].shape) - 1\n\n\[email protected](\n \"estimator\", [LinearRegression(), LogisticRegression(),\n GradientBoostingRegressor(), GradientBoostingClassifier()]\n)\ndef test_partial_dependence_unfitted(estimator):\n X = iris.data\n preprocessor = make_column_transformer(\n (StandardScaler(), [0, 2]), (RobustScaler(), [1, 3])\n )\n pipe = make_pipeline(preprocessor, estimator)\n with pytest.raises(NotFittedError, match=\"is not fitted yet\"):\n partial_dependence(pipe, X, features=[0, 2], grid_resolution=10)\n with pytest.raises(NotFittedError, match=\"is not fitted yet\"):\n partial_dependence(estimator, X, features=[0, 2], grid_resolution=10)\n\n\[email protected]('Estimator, data', [\n (LinearRegression, multioutput_regression_data),\n (LogisticRegression, binary_classification_data)])\ndef test_kind_average_and_average_of_individual(Estimator, data):\n est = Estimator()\n (X, y), n_targets = data\n est.fit(X, y)\n\n pdp_avg = partial_dependence(\n est, X=X, features=[1, 2], kind='average'\n )\n pdp_ind = partial_dependence(\n est, X=X, features=[1, 2], kind='individual'\n )\n avg_ind = np.mean(pdp_ind['individual'], axis=1)\n assert_allclose(avg_ind, pdp_avg['average'])\n\n\ndef test_warning_for_kind_legacy():\n est = LogisticRegression()\n (X, y), n_targets = binary_classification_data\n est.fit(X, y)\n\n err_msg = (\"A Bunch will be returned in place of 'predictions' from \"\n \"version 1.1\")\n with pytest.warns(FutureWarning, match=err_msg):\n partial_dependence(est, X=X, features=[1, 2])\n\n with pytest.warns(FutureWarning, match=err_msg):\n partial_dependence(est, X=X, features=[1, 2], kind='legacy')\n" ]
[ [ "numpy.ones", "numpy.sum", "numpy.find_common_type", "scipy.linalg.lstsq", "numpy.asarray", "scipy.sparse.linalg.LinearOperator", "numpy.ascontiguousarray", "numpy.vstack", "scipy.sparse.dia_matrix", "scipy.optimize.nnls", "numpy.average", "numpy.zeros", "numpy.empty", "scipy.sparse.issparse", "scipy.sparse.csr_matrix", "numpy.ravel", "numpy.iinfo", "scipy.special.expit", "scipy.sparse.isspmatrix", "numpy.sqrt", "numpy.dot", "numpy.full" ], [ "numpy.ones", "sklearn.inspection._partial_dependence._partial_dependence_brute", "sklearn.inspection.partial_dependence", "sklearn.tree.DecisionTreeRegressor", "numpy.asarray", "sklearn.datasets.make_regression", "numpy.random.RandomState", "sklearn.ensemble.RandomForestRegressor", "sklearn.cluster.KMeans", "sklearn.linear_model.LogisticRegression", "sklearn.inspection._partial_dependence._partial_dependence_recursion", "sklearn.ensemble.GradientBoostingRegressor", "sklearn.datasets.load_iris", "numpy.allclose", "sklearn.base.clone", "sklearn.utils._testing.assert_array_equal", "sklearn.utils.validation.check_random_state", "sklearn.linear_model.LinearRegression", "sklearn.preprocessing.scale", "sklearn.tree.tests.test_tree.assert_is_subtree", "sklearn.preprocessing.RobustScaler", "sklearn.ensemble.HistGradientBoostingClassifier", "sklearn.dummy.DummyClassifier", "numpy.mean", "numpy.corrcoef", "sklearn.utils._testing.assert_allclose", "sklearn.pipeline.make_pipeline", "sklearn.preprocessing.StandardScaler", "sklearn.ensemble.HistGradientBoostingRegressor", "sklearn.datasets.make_classification", "sklearn.inspection._partial_dependence._grid_from_X", "numpy.iinfo", "numpy.testing.assert_allclose", "numpy.array", "sklearn.ensemble.GradientBoostingClassifier", "sklearn.preprocessing.PolynomialFeatures" ] ]
zysilence/tensorforce
[ "7539e5dde66f3a93b881006f9b7f38c926ced21b", "7539e5dde66f3a93b881006f9b7f38c926ced21b" ]
[ "tensorforce/core/memories/latest.py", "tensorforce/core/memories/queue.py" ]
[ "# Copyright 2017 reinforce.io. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport tensorflow as tf\n\nfrom tensorforce.core.memories import Queue\n\n\nclass Latest(Queue):\n \"\"\"\n Memory which always retrieves most recent experiences.\n \"\"\"\n\n def __init__(self, states, internals, actions, include_next_states, capacity, scope='latest', summary_labels=None):\n \"\"\"\n Latest memory.\n\n Args:\n states: States specifiction.\n internals: Internal states specification.\n actions: Actions specification.\n include_next_states: Include subsequent state if true.\n capacity: Memory capacity.\n \"\"\"\n super(Latest, self).__init__(\n states=states,\n internals=internals,\n actions=actions,\n include_next_states=include_next_states,\n capacity=capacity,\n scope=scope,\n summary_labels=summary_labels\n )\n\n def tf_retrieve_timesteps(self, n):\n num_timesteps = (self.memory_index - self.episode_indices[-1] - 2) % self.capacity + 1\n n = tf.minimum(x=n, y=num_timesteps)\n indices = tf.range(\n start=(self.memory_index - n),\n limit=self.memory_index\n ) % self.capacity\n return self.retrieve_indices(indices=indices)\n\n def tf_retrieve_episodes(self, n):\n n = tf.minimum(x=n, y=self.episode_count)\n start = self.episode_indices[self.episode_count - n - 1] + 1\n limit = self.episode_indices[self.episode_count - 1] + 1\n limit += tf.where(condition=(start < limit), x=0, y=self.capacity)\n indices = tf.range(start=start, limit=limit) % self.capacity\n return self.retrieve_indices(indices=indices)\n\n def tf_retrieve_sequences(self, n, sequence_length):\n # Remove once #128 is resolved\n tf.logging.warn(\"Sampling sequences is not validated yet. Use timesteps or episodes instead.\")\n num_sequences = (self.memory_index - self.episode_indices[-1] - 2 - sequence_length + 1) % self.capacity + 1\n n = tf.minimum(x=n, y=num_sequences)\n indices = tf.range(\n start=(self.memory_index - n - sequence_length), # or '- 1' implied in sequence length?\n limit=self.memory_index\n ) % self.capacity\n # sequence_indices = [tf.range(start=indices[n], limit=(indices[n] + sequence_length)) for k in range(n)]\n # sequence_indices = [indices[k: k + sequence_length] for k in tf.unstack(value=tf.range(start=0, limit=n), num=n)]\n sequence_indices = tf.expand_dims(input=tf.range(start=0, limit=n), axis=1) + tf.expand_dims(input=tf.constant(value=list(range(sequence_length))), axis=0)\n sequence_indices = tf.reshape(tensor=sequence_indices, shape=(n * sequence_length,))\n # sequence_indices = tf.concat(values=sequence_indices, axis=0) # tf.stack !!!!!\n terminal = tf.gather(params=self.terminal_memory, indices=indices)\n sequence_indices = tf.boolean_mask(tensor=sequence_indices, mask=tf.logical_not(x=terminal))\n return self.retrieve_indices(indices=sequence_indices)\n", "# Copyright 2017 reinforce.io. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport tensorflow as tf\n\nfrom tensorforce import util\nfrom tensorforce.core.memories import Memory\n\n\nclass Queue(Memory):\n \"\"\"\n Base class for memories organized as a queue (FIFO).\n \"\"\"\n\n def __init__(self, states, internals, actions, include_next_states, capacity, scope='queue', summary_labels=None):\n \"\"\"\n Queue memory.\n\n Args:\n capacity: Memory capacity.\n \"\"\"\n self.capacity = capacity\n self.scope = scope\n\n # Pieces of the records are stored in different tensors:\n self.states_memory = dict() # keys=state space components\n self.internals_memory = dict() # keys=internal state components\n self.actions_memory = dict() # keys=action space components\n self.terminal_memory = None # 1D tensor\n self.reward_memory = None # 1D tensor\n self.memory_index = None # 0D (int) tensor (points to the next record to be overwritten)\n self.episode_indices = None # 1D tensor of indexes where episodes start.\n self.episode_count = None # 0D (int) tensor: How many episodes do we have stored?\n\n self.retrieve_indices = None\n\n super(Queue, self).__init__(\n states=states,\n internals=internals,\n actions=actions,\n include_next_states=include_next_states,\n scope=scope,\n summary_labels=summary_labels\n )\n\n def setup_template_funcs(self, custom_getter=None):\n custom_getter = super(Queue, self).setup_template_funcs(custom_getter=custom_getter)\n\n self.retrieve_indices = tf.make_template(\n name_=(self.scope + '/retrieve_indices'),\n func_=self.tf_retrieve_indices,\n custom_getter_=custom_getter\n )\n\n def tf_initialize(self):\n # States\n for name in sorted(self.states_spec):\n state = self.states_spec[name]\n self.states_memory[name] = tf.get_variable(\n name=('state-' + name),\n shape=(self.capacity,) + tuple(state['shape']),\n dtype=util.tf_dtype(state['type']),\n trainable=False\n )\n\n # Internals\n for name in sorted(self.internals_spec):\n internal = self.internals_spec[name]\n self.internals_memory[name] = tf.get_variable(\n name=('internal-' + name),\n shape=(self.capacity,) + tuple(internal['shape']),\n dtype=util.tf_dtype(internal['type']),\n trainable=False\n )\n\n # Actions\n for name in sorted(self.actions_spec):\n action = self.actions_spec[name]\n self.actions_memory[name] = tf.get_variable(\n name=('action-' + name),\n shape=(self.capacity,) + tuple(action['shape']),\n dtype=util.tf_dtype(action['type']),\n trainable=False\n )\n\n # Terminal\n self.terminal_memory = tf.get_variable(\n name='terminal',\n shape=(self.capacity,),\n dtype=util.tf_dtype('bool'),\n initializer=tf.constant_initializer(\n value=False,\n dtype=util.tf_dtype('bool')\n ),\n trainable=False\n )\n\n # Reward\n self.reward_memory = tf.get_variable(\n name='reward',\n shape=(self.capacity,),\n dtype=util.tf_dtype('float'),\n trainable=False\n )\n\n # Memory index\n self.memory_index = tf.get_variable(\n name='memory-index',\n dtype=util.tf_dtype('int'),\n initializer=0,\n trainable=False\n )\n\n # Episode indices\n self.episode_indices = tf.get_variable(\n name='episode-indices',\n shape=(self.capacity + 1,),\n dtype=util.tf_dtype('int'),\n initializer=tf.constant_initializer(value=(self.capacity - 1), dtype=util.tf_dtype('int')),\n trainable=False\n )\n\n # Episodes index\n self.episode_count = tf.get_variable(\n name='episode-count',\n dtype=util.tf_dtype('int'),\n initializer=0,\n trainable=False\n )\n\n def tf_store(self, states, internals, actions, terminal, reward):\n # Memory indices to overwrite.\n num_instances = tf.shape(input=terminal)[0]\n with tf.control_dependencies([tf.assert_less_equal(num_instances, self.capacity)]):\n indices = tf.range(self.memory_index, self.memory_index + num_instances) % self.capacity\n\n # Remove episode indices.\n num_episodes = tf.count_nonzero(\n input_tensor=tf.gather(params=self.terminal_memory, indices=indices),\n axis=0,\n dtype=util.tf_dtype('int')\n )\n num_episodes = tf.minimum(x=num_episodes, y=self.episode_count)\n assignment = tf.assign(\n ref=self.episode_indices[:self.episode_count - num_episodes],\n value=self.episode_indices[num_episodes: self.episode_count]\n )\n\n # Decrement episode count.\n with tf.control_dependencies(control_inputs=(assignment,)):\n assignment = tf.assign_sub(ref=self.episode_count, value=num_episodes)\n\n # Assign new observations.\n with tf.control_dependencies(control_inputs=(assignment,)):\n assignments = list()\n for name in sorted(states):\n assignments.append(tf.scatter_update(\n ref=self.states_memory[name],\n indices=indices,\n updates=states[name]\n ))\n for name in sorted(internals):\n assignments.append(tf.scatter_update(\n ref=self.internals_memory[name],\n indices=indices,\n updates=internals[name]\n ))\n for name in sorted(actions):\n assignments.append(tf.scatter_update(\n ref=self.actions_memory[name],\n indices=indices,\n updates=actions[name]\n ))\n assignments.append(tf.scatter_update(ref=self.terminal_memory, indices=indices, updates=terminal))\n assignments.append(tf.scatter_update(ref=self.reward_memory, indices=indices, updates=reward))\n\n # Add episode indices.\n with tf.control_dependencies(control_inputs=assignments):\n num_episodes = tf.count_nonzero(input_tensor=terminal, axis=0, dtype=util.tf_dtype('int'))\n assignment = tf.assign(\n ref=self.episode_indices[self.episode_count: self.episode_count + num_episodes],\n value=tf.boolean_mask(tensor=indices, mask=terminal)\n )\n\n # Increment episode count.\n with tf.control_dependencies(control_inputs=(assignment,)):\n assignment = tf.assign_add(ref=self.episode_count, value=num_episodes)\n\n # Increment memory index.\n with tf.control_dependencies(control_inputs=(assignment,)):\n assignment = tf.assign(\n ref=self.episode_indices[-1],\n value=tf.where(self.memory_index + num_instances > self.capacity,\n self.episode_indices[self.episode_count - 1], self.capacity - 1)\n )\n\n with tf.control_dependencies(control_inputs=(assignment,)):\n assignment = tf.assign(ref=self.memory_index, value=((self.memory_index + num_instances) % self.capacity))\n\n with tf.control_dependencies(control_inputs=(assignment,)):\n return tf.no_op()\n\n def tf_retrieve_indices(self, indices):\n \"\"\"\n Fetches experiences for given indices.\n\n Args:\n indices: Index tensor\n\n Returns: Batch of experiences\n \"\"\"\n states = dict()\n for name in sorted(self.states_memory):\n states[name] = tf.gather(params=self.states_memory[name], indices=indices)\n\n internals = dict()\n for name in sorted(self.internals_memory):\n internals[name] = tf.gather(params=self.internals_memory[name], indices=indices)\n\n actions = dict()\n for name in sorted(self.actions_memory):\n actions[name] = tf.gather(params=self.actions_memory[name], indices=indices)\n\n terminal = tf.gather(params=self.terminal_memory, indices=indices)\n reward = tf.gather(params=self.reward_memory, indices=indices)\n\n if self.include_next_states:\n assert util.rank(indices) == 1\n next_indices = (indices + 1) % self.capacity\n\n next_states = dict()\n for name in sorted(self.states_memory):\n next_states[name] = tf.gather(params=self.states_memory[name], indices=next_indices)\n\n next_internals = dict()\n for name in sorted(self.internals_memory):\n next_internals[name] = tf.gather(params=self.internals_memory[name], indices=next_indices)\n\n return dict(\n states=states,\n internals=internals,\n actions=actions,\n terminal=terminal,\n reward=reward,\n next_states=next_states,\n next_internals=next_internals\n )\n else:\n return dict(\n states=states,\n internals=internals,\n actions=actions,\n terminal=terminal,\n reward=reward\n )\n" ]
[ [ "tensorflow.logging.warn", "tensorflow.minimum", "tensorflow.reshape", "tensorflow.logical_not", "tensorflow.range", "tensorflow.where", "tensorflow.gather" ], [ "tensorflow.assign_sub", "tensorflow.gather", "tensorflow.minimum", "tensorflow.shape", "tensorflow.assign_add", "tensorflow.no_op", "tensorflow.range", "tensorflow.assert_less_equal", "tensorflow.assign", "tensorflow.where", "tensorflow.scatter_update", "tensorflow.boolean_mask", "tensorflow.make_template", "tensorflow.control_dependencies" ] ]
jjhelmus/scikit-image
[ "b9b5fde0821fe8bcece2528b30d012c65c64ad6f", "b9b5fde0821fe8bcece2528b30d012c65c64ad6f" ]
[ "skimage/transform/radon_transform.py", "skimage/measure/tests/test_regionprops.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nradon.py - Radon and inverse radon transforms\n\nBased on code of Justin K. Romberg\n(http://www.clear.rice.edu/elec431/projects96/DSP/bpanalysis.html)\nJ. Gillam and Chris Griffin.\n\nReferences:\n -B.R. Ramesh, N. Srinivasa, K. Rajgopal, \"An Algorithm for Computing\n the Discrete Radon Transform With Some Applications\", Proceedings of\n the Fourth IEEE Region 10 International Conference, TENCON '89, 1989.\n -A. C. Kak, Malcolm Slaney, \"Principles of Computerized Tomographic\n Imaging\", IEEE Press 1988.\n\"\"\"\nfrom __future__ import division\nimport numpy as np\nfrom scipy.fftpack import fft, ifft, fftfreq\nfrom scipy.interpolate import interp1d\nfrom ._warps_cy import _warp_fast\nfrom ._radon_transform import sart_projection_update\nfrom .. import util\nfrom warnings import warn\n\n\n__all__ = [\"radon\", \"iradon\", \"iradon_sart\"]\n\n\ndef radon(image, theta=None, circle=False):\n \"\"\"\n Calculates the radon transform of an image given specified\n projection angles.\n\n Parameters\n ----------\n image : array_like, dtype=float\n Input image. The rotation axis will be located in the pixel with\n indices ``(image.shape[0] // 2, image.shape[1] // 2)``.\n theta : array_like, dtype=float, optional (default np.arange(180))\n Projection angles (in degrees).\n circle : boolean, optional\n Assume image is zero outside the inscribed circle, making the\n width of each projection (the first dimension of the sinogram)\n equal to ``min(image.shape)``.\n\n Returns\n -------\n radon_image : ndarray\n Radon transform (sinogram). The tomography rotation axis will lie\n at the pixel index ``radon_image.shape[0] // 2`` along the 0th\n dimension of ``radon_image``.\n\n \"\"\"\n if image.ndim != 2:\n raise ValueError('The input image must be 2-D')\n if theta is None:\n theta = np.arange(180)\n\n if circle:\n radius = min(image.shape) // 2\n c0, c1 = np.ogrid[0:image.shape[0], 0:image.shape[1]]\n reconstruction_circle = ((c0 - image.shape[0] // 2) ** 2\n + (c1 - image.shape[1] // 2) ** 2)\n reconstruction_circle = reconstruction_circle <= radius ** 2\n if not np.all(reconstruction_circle | (image == 0)):\n warn('Radon transform: image must be zero outside the '\n 'reconstruction circle')\n # Crop image to make it square\n slices = []\n for d in (0, 1):\n if image.shape[d] > min(image.shape):\n excess = image.shape[d] - min(image.shape)\n slices.append(slice(int(np.ceil(excess / 2)),\n int(np.ceil(excess / 2)\n + min(image.shape))))\n else:\n slices.append(slice(None))\n slices = tuple(slices)\n padded_image = image[slices]\n else:\n diagonal = np.sqrt(2) * max(image.shape)\n pad = [int(np.ceil(diagonal - s)) for s in image.shape]\n new_center = [(s + p) // 2 for s, p in zip(image.shape, pad)]\n old_center = [s // 2 for s in image.shape]\n pad_before = [nc - oc for oc, nc in zip(old_center, new_center)]\n pad_width = [(pb, p - pb) for pb, p in zip(pad_before, pad)]\n padded_image = util.pad(image, pad_width, mode='constant',\n constant_values=0)\n # padded_image is always square\n assert padded_image.shape[0] == padded_image.shape[1]\n radon_image = np.zeros((padded_image.shape[0], len(theta)))\n center = padded_image.shape[0] // 2\n\n shift0 = np.array([[1, 0, -center],\n [0, 1, -center],\n [0, 0, 1]])\n shift1 = np.array([[1, 0, center],\n [0, 1, center],\n [0, 0, 1]])\n\n def build_rotation(theta):\n T = np.deg2rad(theta)\n R = np.array([[np.cos(T), np.sin(T), 0],\n [-np.sin(T), np.cos(T), 0],\n [0, 0, 1]])\n return shift1.dot(R).dot(shift0)\n\n for i in range(len(theta)):\n rotated = _warp_fast(padded_image, build_rotation(theta[i]))\n radon_image[:, i] = rotated.sum(0)\n return radon_image\n\n\ndef _sinogram_circle_to_square(sinogram):\n diagonal = int(np.ceil(np.sqrt(2) * sinogram.shape[0]))\n pad = diagonal - sinogram.shape[0]\n old_center = sinogram.shape[0] // 2\n new_center = diagonal // 2\n pad_before = new_center - old_center\n pad_width = ((pad_before, pad - pad_before), (0, 0))\n return util.pad(sinogram, pad_width, mode='constant', constant_values=0)\n\n\ndef iradon(radon_image, theta=None, output_size=None,\n filter=\"ramp\", interpolation=\"linear\", circle=False):\n \"\"\"\n Inverse radon transform.\n\n Reconstruct an image from the radon transform, using the filtered\n back projection algorithm.\n\n Parameters\n ----------\n radon_image : array_like, dtype=float\n Image containing radon transform (sinogram). Each column of\n the image corresponds to a projection along a different angle. The\n tomography rotation axis should lie at the pixel index\n ``radon_image.shape[0] // 2`` along the 0th dimension of\n ``radon_image``.\n theta : array_like, dtype=float, optional\n Reconstruction angles (in degrees). Default: m angles evenly spaced\n between 0 and 180 (if the shape of `radon_image` is (N, M)).\n output_size : int\n Number of rows and columns in the reconstruction.\n filter : str, optional (default ramp)\n Filter used in frequency domain filtering. Ramp filter used by default.\n Filters available: ramp, shepp-logan, cosine, hamming, hann.\n Assign None to use no filter.\n interpolation : str, optional (default 'linear')\n Interpolation method used in reconstruction. Methods available:\n 'linear', 'nearest', and 'cubic' ('cubic' is slow).\n circle : boolean, optional\n Assume the reconstructed image is zero outside the inscribed circle.\n Also changes the default output_size to match the behaviour of\n ``radon`` called with ``circle=True``.\n\n Returns\n -------\n reconstructed : ndarray\n Reconstructed image. The rotation axis will be located in the pixel\n with indices\n ``(reconstructed.shape[0] // 2, reconstructed.shape[1] // 2)``.\n\n Notes\n -----\n It applies the Fourier slice theorem to reconstruct an image by\n multiplying the frequency domain of the filter with the FFT of the\n projection data. This algorithm is called filtered back projection.\n\n \"\"\"\n if radon_image.ndim != 2:\n raise ValueError('The input image must be 2-D')\n if theta is None:\n m, n = radon_image.shape\n theta = np.linspace(0, 180, n, endpoint=False)\n else:\n theta = np.asarray(theta)\n if len(theta) != radon_image.shape[1]:\n raise ValueError(\"The given ``theta`` does not match the number of \"\n \"projections in ``radon_image``.\")\n interpolation_types = ('linear', 'nearest', 'cubic')\n if not interpolation in interpolation_types:\n raise ValueError(\"Unknown interpolation: %s\" % interpolation)\n if not output_size:\n # If output size not specified, estimate from input radon image\n if circle:\n output_size = radon_image.shape[0]\n else:\n output_size = int(np.floor(np.sqrt((radon_image.shape[0]) ** 2\n / 2.0)))\n if circle:\n radon_image = _sinogram_circle_to_square(radon_image)\n\n th = (np.pi / 180.0) * theta\n # resize image to next power of two (but no less than 64) for\n # Fourier analysis; speeds up Fourier and lessens artifacts\n projection_size_padded = \\\n max(64, int(2 ** np.ceil(np.log2(2 * radon_image.shape[0]))))\n pad_width = ((0, projection_size_padded - radon_image.shape[0]), (0, 0))\n img = util.pad(radon_image, pad_width, mode='constant', constant_values=0)\n\n # Construct the Fourier filter\n f = fftfreq(projection_size_padded).reshape(-1, 1) # digital frequency\n omega = 2 * np.pi * f # angular frequency\n fourier_filter = 2 * np.abs(f) # ramp filter\n if filter == \"ramp\":\n pass\n elif filter == \"shepp-logan\":\n # Start from first element to avoid divide by zero\n fourier_filter[1:] = fourier_filter[1:] * np.sin(omega[1:]) / omega[1:]\n elif filter == \"cosine\":\n fourier_filter *= np.cos(omega)\n elif filter == \"hamming\":\n fourier_filter *= (0.54 + 0.46 * np.cos(omega / 2))\n elif filter == \"hann\":\n fourier_filter *= (1 + np.cos(omega / 2)) / 2\n elif filter is None:\n fourier_filter[:] = 1\n else:\n raise ValueError(\"Unknown filter: %s\" % filter)\n # Apply filter in Fourier domain\n projection = fft(img, axis=0) * fourier_filter\n radon_filtered = np.real(ifft(projection, axis=0))\n\n # Resize filtered image back to original size\n radon_filtered = radon_filtered[:radon_image.shape[0], :]\n reconstructed = np.zeros((output_size, output_size))\n # Determine the center of the projections (= center of sinogram)\n mid_index = radon_image.shape[0] // 2\n\n [X, Y] = np.mgrid[0:output_size, 0:output_size]\n xpr = X - int(output_size) // 2\n ypr = Y - int(output_size) // 2\n\n # Reconstruct image by interpolation\n for i in range(len(theta)):\n t = ypr * np.cos(th[i]) - xpr * np.sin(th[i])\n x = np.arange(radon_filtered.shape[0]) - mid_index\n if interpolation == 'linear':\n backprojected = np.interp(t, x, radon_filtered[:, i],\n left=0, right=0)\n else:\n interpolant = interp1d(x, radon_filtered[:, i], kind=interpolation,\n bounds_error=False, fill_value=0)\n backprojected = interpolant(t)\n reconstructed += backprojected\n if circle:\n radius = output_size // 2\n reconstruction_circle = (xpr ** 2 + ypr ** 2) <= radius ** 2\n reconstructed[~reconstruction_circle] = 0.\n\n return reconstructed * np.pi / (2 * len(th))\n\n\ndef order_angles_golden_ratio(theta):\n \"\"\"\n Order angles to reduce the amount of correlated information\n in subsequent projections.\n\n Parameters\n ----------\n theta : 1D array of floats\n Projection angles in degrees. Duplicate angles are not allowed.\n\n Returns\n -------\n indices_generator : generator yielding unsigned integers\n The returned generator yields indices into ``theta`` such that\n ``theta[indices]`` gives the approximate golden ratio ordering\n of the projections. In total, ``len(theta)`` indices are yielded.\n All non-negative integers < ``len(theta)`` are yielded exactly once.\n\n Notes\n -----\n The method used here is that of the golden ratio introduced\n by T. Kohler.\n\n References\n ----------\n .. [1] Kohler, T. \"A projection access scheme for iterative\n reconstruction based on the golden section.\" Nuclear Science\n Symposium Conference Record, 2004 IEEE. Vol. 6. IEEE, 2004.\n .. [2] Winkelmann, Stefanie, et al. \"An optimal radial profile order\n based on the Golden Ratio for time-resolved MRI.\"\n Medical Imaging, IEEE Transactions on 26.1 (2007): 68-76.\n \"\"\"\n interval = 180\n\n def angle_distance(a, b):\n difference = a - b\n return min(abs(difference % interval), abs(difference % -interval))\n\n remaining = list(np.argsort(theta)) # indices into theta\n # yield an arbitrary angle to start things off\n index = remaining.pop(0)\n angle = theta[index]\n yield index\n # determine subsequent angles using the golden ratio method\n angle_increment = interval * (1 - (np.sqrt(5) - 1) / 2)\n while remaining:\n angle = (angle + angle_increment) % interval\n insert_point = np.searchsorted(theta[remaining], angle)\n index_below = insert_point - 1\n index_above = 0 if insert_point == len(remaining) else insert_point\n distance_below = angle_distance(angle, theta[remaining[index_below]])\n distance_above = angle_distance(angle, theta[remaining[index_above]])\n if distance_below < distance_above:\n yield remaining.pop(index_below)\n else:\n yield remaining.pop(index_above)\n\n\ndef iradon_sart(radon_image, theta=None, image=None, projection_shifts=None,\n clip=None, relaxation=0.15):\n \"\"\"\n Inverse radon transform\n\n Reconstruct an image from the radon transform, using a single iteration of\n the Simultaneous Algebraic Reconstruction Technique (SART) algorithm.\n\n Parameters\n ----------\n radon_image : 2D array, dtype=float\n Image containing radon transform (sinogram). Each column of\n the image corresponds to a projection along a different angle. The\n tomography rotation axis should lie at the pixel index\n ``radon_image.shape[0] // 2`` along the 0th dimension of\n ``radon_image``.\n theta : 1D array, dtype=float, optional\n Reconstruction angles (in degrees). Default: m angles evenly spaced\n between 0 and 180 (if the shape of `radon_image` is (N, M)).\n image : 2D array, dtype=float, optional\n Image containing an initial reconstruction estimate. Shape of this\n array should be ``(radon_image.shape[0], radon_image.shape[0])``. The\n default is an array of zeros.\n projection_shifts : 1D array, dtype=float\n Shift the projections contained in ``radon_image`` (the sinogram) by\n this many pixels before reconstructing the image. The i'th value\n defines the shift of the i'th column of ``radon_image``.\n clip : length-2 sequence of floats\n Force all values in the reconstructed tomogram to lie in the range\n ``[clip[0], clip[1]]``\n relaxation : float\n Relaxation parameter for the update step. A higher value can\n improve the convergence rate, but one runs the risk of instabilities.\n Values close to or higher than 1 are not recommended.\n\n Returns\n -------\n reconstructed : ndarray\n Reconstructed image. The rotation axis will be located in the pixel\n with indices\n ``(reconstructed.shape[0] // 2, reconstructed.shape[1] // 2)``.\n\n Notes\n -----\n Algebraic Reconstruction Techniques are based on formulating the tomography\n reconstruction problem as a set of linear equations. Along each ray,\n the projected value is the sum of all the values of the cross section along\n the ray. A typical feature of SART (and a few other variants of algebraic\n techniques) is that it samples the cross section at equidistant points\n along the ray, using linear interpolation between the pixel values of the\n cross section. The resulting set of linear equations are then solved using\n a slightly modified Kaczmarz method.\n\n When using SART, a single iteration is usually sufficient to obtain a good\n reconstruction. Further iterations will tend to enhance high-frequency\n information, but will also often increase the noise.\n\n References\n ----------\n .. [1] AC Kak, M Slaney, \"Principles of Computerized Tomographic\n Imaging\", IEEE Press 1988.\n .. [2] AH Andersen, AC Kak, \"Simultaneous algebraic reconstruction\n technique (SART): a superior implementation of the ART algorithm\",\n Ultrasonic Imaging 6 pp 81--94 (1984)\n .. [3] S Kaczmarz, \"Angenäherte auflösung von systemen linearer\n gleichungen\", Bulletin International de l’Academie Polonaise des\n Sciences et des Lettres 35 pp 355--357 (1937)\n .. [4] Kohler, T. \"A projection access scheme for iterative\n reconstruction based on the golden section.\" Nuclear Science\n Symposium Conference Record, 2004 IEEE. Vol. 6. IEEE, 2004.\n .. [5] Kaczmarz' method, Wikipedia,\n http://en.wikipedia.org/wiki/Kaczmarz_method\n \"\"\"\n if radon_image.ndim != 2:\n raise ValueError('radon_image must be two dimensional')\n reconstructed_shape = (radon_image.shape[0], radon_image.shape[0])\n if theta is None:\n theta = np.linspace(0, 180, radon_image.shape[1], endpoint=False)\n elif theta.shape != (radon_image.shape[1],):\n raise ValueError('Shape of theta (%s) does not match the '\n 'number of projections (%d)'\n % (projection_shifts.shape, radon_image.shape[1]))\n if image is None:\n image = np.zeros(reconstructed_shape, dtype=np.float)\n elif image.shape != reconstructed_shape:\n raise ValueError('Shape of image (%s) does not match first dimension '\n 'of radon_image (%s)'\n % (image.shape, reconstructed_shape))\n if projection_shifts is None:\n projection_shifts = np.zeros((radon_image.shape[1],), dtype=np.float)\n elif projection_shifts.shape != (radon_image.shape[1],):\n raise ValueError('Shape of projection_shifts (%s) does not match the '\n 'number of projections (%d)'\n % (projection_shifts.shape, radon_image.shape[1]))\n if not clip is None:\n if len(clip) != 2:\n raise ValueError('clip must be a length-2 sequence')\n clip = (float(clip[0]), float(clip[1]))\n relaxation = float(relaxation)\n\n for angle_index in order_angles_golden_ratio(theta):\n image_update = sart_projection_update(image, theta[angle_index],\n radon_image[:, angle_index],\n projection_shifts[angle_index])\n image += relaxation * image_update\n if not clip is None:\n image = np.clip(image, clip[0], clip[1])\n return image\n", "from numpy.testing import assert_array_equal, assert_almost_equal, \\\n assert_array_almost_equal, assert_raises, assert_equal\nimport numpy as np\nimport math\n\nfrom skimage.measure._regionprops import (regionprops, PROPS, perimeter,\n _parse_docs)\n\n\nSAMPLE = np.array(\n [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1],\n [0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]\n)\nINTENSITY_SAMPLE = SAMPLE.copy()\nINTENSITY_SAMPLE[1, 9:11] = 2\n\nSAMPLE_3D = np.zeros((6, 6, 6), dtype=np.uint8)\nSAMPLE_3D[1:3, 1:3, 1:3] = 1\nSAMPLE_3D[3, 2, 2] = 1 \nINTENSITY_SAMPLE_3D = SAMPLE_3D.copy()\n\ndef test_all_props():\n region = regionprops(SAMPLE, INTENSITY_SAMPLE)[0]\n for prop in PROPS:\n assert_almost_equal(region[prop], getattr(region, PROPS[prop]))\n\n\ndef test_all_props_3d():\n region = regionprops(SAMPLE_3D, INTENSITY_SAMPLE_3D)[0]\n for prop in PROPS:\n try:\n assert_almost_equal(region[prop], getattr(region, PROPS[prop]))\n except NotImplementedError:\n pass\n\ndef test_dtype():\n regionprops(np.zeros((10, 10), dtype=np.int))\n regionprops(np.zeros((10, 10), dtype=np.uint))\n assert_raises((TypeError), regionprops,\n np.zeros((10, 10), dtype=np.float))\n assert_raises((TypeError), regionprops,\n np.zeros((10, 10), dtype=np.double))\n\n\ndef test_ndim():\n regionprops(np.zeros((10, 10), dtype=np.int))\n regionprops(np.zeros((10, 10, 1), dtype=np.int))\n regionprops(np.zeros((10, 10, 1, 1), dtype=np.int))\n regionprops(np.zeros((10, 10, 10), dtype=np.int))\n assert_raises(TypeError, regionprops, np.zeros((10, 10, 10, 2), dtype=np.int))\n\n\ndef test_area():\n area = regionprops(SAMPLE)[0].area\n assert area == np.sum(SAMPLE)\n area = regionprops(SAMPLE_3D)[0].area\n assert area == np.sum(SAMPLE_3D)\n\n\ndef test_bbox():\n bbox = regionprops(SAMPLE)[0].bbox\n assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1]))\n\n SAMPLE_mod = SAMPLE.copy()\n SAMPLE_mod[:, -1] = 0\n bbox = regionprops(SAMPLE_mod)[0].bbox\n assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1]-1))\n\n bbox = regionprops(SAMPLE_3D)[0].bbox\n assert_array_almost_equal(bbox, (1, 1, 1, 4, 3, 3))\n\n\ndef test_moments_central():\n mu = regionprops(SAMPLE)[0].moments_central\n # determined with OpenCV\n assert_almost_equal(mu[0,2], 436.00000000000045)\n # different from OpenCV results, bug in OpenCV\n assert_almost_equal(mu[0,3], -737.333333333333)\n assert_almost_equal(mu[1,1], -87.33333333333303)\n assert_almost_equal(mu[1,2], -127.5555555555593)\n assert_almost_equal(mu[2,0], 1259.7777777777774)\n assert_almost_equal(mu[2,1], 2000.296296296291)\n assert_almost_equal(mu[3,0], -760.0246913580195)\n\n\ndef test_centroid():\n centroid = regionprops(SAMPLE)[0].centroid\n # determined with MATLAB\n assert_array_almost_equal(centroid, (5.66666666666666, 9.444444444444444))\n\n\ndef test_convex_area():\n area = regionprops(SAMPLE)[0].convex_area\n # determined with MATLAB\n assert area == 124\n\n\ndef test_convex_image():\n img = regionprops(SAMPLE)[0].convex_image\n # determined with MATLAB\n ref = np.array(\n [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\n )\n assert_array_equal(img, ref)\n\n\ndef test_coordinates():\n sample = np.zeros((10, 10), dtype=np.int8)\n coords = np.array([[3, 2], [3, 3], [3, 4]])\n sample[coords[:, 0], coords[:, 1]] = 1\n prop_coords = regionprops(sample)[0].coords\n assert_array_equal(prop_coords, coords)\n\n sample = np.zeros((6, 6, 6), dtype=np.int8)\n coords = np.array([[1, 1, 1], [1, 2, 1], [1, 3, 1]])\n sample[coords[:, 0], coords[:, 1], coords[:, 2]] = 1\n prop_coords = regionprops(sample)[0].coords\n assert_array_equal(prop_coords, coords)\n\ndef test_eccentricity():\n eps = regionprops(SAMPLE)[0].eccentricity\n assert_almost_equal(eps, 0.814629313427)\n\n img = np.zeros((5, 5), dtype=np.int)\n img[2, 2] = 1\n eps = regionprops(img)[0].eccentricity\n assert_almost_equal(eps, 0)\n\n\ndef test_equiv_diameter():\n diameter = regionprops(SAMPLE)[0].equivalent_diameter\n # determined with MATLAB\n assert_almost_equal(diameter, 9.57461472963)\n\n\ndef test_euler_number():\n en = regionprops(SAMPLE)[0].euler_number\n assert en == 1\n\n SAMPLE_mod = SAMPLE.copy()\n SAMPLE_mod[7, -3] = 0\n en = regionprops(SAMPLE_mod)[0].euler_number\n assert en == 0\n\n\ndef test_extent():\n extent = regionprops(SAMPLE)[0].extent\n assert_almost_equal(extent, 0.4)\n\n\ndef test_moments_hu():\n hu = regionprops(SAMPLE)[0].moments_hu\n ref = np.array([\n 3.27117627e-01,\n 2.63869194e-02,\n 2.35390060e-02,\n 1.23151193e-03,\n 1.38882330e-06,\n -2.72586158e-05,\n 6.48350653e-06\n ])\n # bug in OpenCV caused in Central Moments calculation?\n assert_array_almost_equal(hu, ref)\n\n\ndef test_image():\n img = regionprops(SAMPLE)[0].image\n assert_array_equal(img, SAMPLE)\n\n img = regionprops(SAMPLE_3D)[0].image\n assert_array_equal(img, SAMPLE_3D[1:4, 1:3, 1:3])\n\n\ndef test_label():\n label = regionprops(SAMPLE)[0].label\n assert_array_equal(label, 1)\n\n label = regionprops(SAMPLE_3D)[0].label\n assert_array_equal(label, 1)\n\n\ndef test_filled_area():\n area = regionprops(SAMPLE)[0].filled_area\n assert area == np.sum(SAMPLE)\n\n SAMPLE_mod = SAMPLE.copy()\n SAMPLE_mod[7, -3] = 0\n area = regionprops(SAMPLE_mod)[0].filled_area\n assert area == np.sum(SAMPLE)\n\n\ndef test_filled_image():\n img = regionprops(SAMPLE)[0].filled_image\n assert_array_equal(img, SAMPLE)\n\n\ndef test_major_axis_length():\n length = regionprops(SAMPLE)[0].major_axis_length\n # MATLAB has different interpretation of ellipse than found in literature,\n # here implemented as found in literature\n assert_almost_equal(length, 16.7924234999)\n\n\ndef test_max_intensity():\n intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].max_intensity\n assert_almost_equal(intensity, 2)\n\n\ndef test_mean_intensity():\n intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].mean_intensity\n assert_almost_equal(intensity, 1.02777777777777)\n\n\ndef test_min_intensity():\n intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].min_intensity\n assert_almost_equal(intensity, 1)\n\n\ndef test_minor_axis_length():\n length = regionprops(SAMPLE)[0].minor_axis_length\n # MATLAB has different interpretation of ellipse than found in literature,\n # here implemented as found in literature\n assert_almost_equal(length, 9.739302807263)\n\n\ndef test_moments():\n m = regionprops(SAMPLE)[0].moments\n # determined with OpenCV\n assert_almost_equal(m[0,0], 72.0)\n assert_almost_equal(m[0,1], 408.0)\n assert_almost_equal(m[0,2], 2748.0)\n assert_almost_equal(m[0,3], 19776.0)\n assert_almost_equal(m[1,0], 680.0)\n assert_almost_equal(m[1,1], 3766.0)\n assert_almost_equal(m[1,2], 24836.0)\n assert_almost_equal(m[2,0], 7682.0)\n assert_almost_equal(m[2,1], 43882.0)\n assert_almost_equal(m[3,0], 95588.0)\n\n\ndef test_moments_normalized():\n nu = regionprops(SAMPLE)[0].moments_normalized\n # determined with OpenCV\n assert_almost_equal(nu[0,2], 0.08410493827160502)\n assert_almost_equal(nu[1,1], -0.016846707818929982)\n assert_almost_equal(nu[1,2], -0.002899800614433943)\n assert_almost_equal(nu[2,0], 0.24301268861454037)\n assert_almost_equal(nu[2,1], 0.045473992910668816)\n assert_almost_equal(nu[3,0], -0.017278118992041805)\n\n\ndef test_orientation():\n orientation = regionprops(SAMPLE)[0].orientation\n # determined with MATLAB\n assert_almost_equal(orientation, 0.10446844651921)\n # test correct quadrant determination\n orientation2 = regionprops(SAMPLE.T)[0].orientation\n assert_almost_equal(orientation2, math.pi / 2 - orientation)\n # test diagonal regions\n diag = np.eye(10, dtype=int)\n orientation_diag = regionprops(diag)[0].orientation\n assert_almost_equal(orientation_diag, -math.pi / 4)\n orientation_diag = regionprops(np.flipud(diag))[0].orientation\n assert_almost_equal(orientation_diag, math.pi / 4)\n orientation_diag = regionprops(np.fliplr(diag))[0].orientation\n assert_almost_equal(orientation_diag, math.pi / 4)\n orientation_diag = regionprops(np.fliplr(np.flipud(diag)))[0].orientation\n assert_almost_equal(orientation_diag, -math.pi / 4)\n\n\ndef test_perimeter():\n per = regionprops(SAMPLE)[0].perimeter\n assert_almost_equal(per, 55.2487373415)\n\n per = perimeter(SAMPLE.astype('double'), neighbourhood=8)\n assert_almost_equal(per, 46.8284271247)\n\n\ndef test_solidity():\n solidity = regionprops(SAMPLE)[0].solidity\n # determined with MATLAB\n assert_almost_equal(solidity, 0.580645161290323)\n\n\ndef test_weighted_moments_central():\n wmu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].weighted_moments_central\n ref = np.array(\n [[ 7.4000000000e+01, -2.1316282073e-13, 4.7837837838e+02,\n -7.5943608473e+02],\n [ 3.7303493627e-14, -8.7837837838e+01, -1.4801314828e+02,\n -1.2714707125e+03],\n [ 1.2602837838e+03, 2.1571526662e+03, 6.6989799420e+03,\n 1.5304076361e+04],\n [ -7.6561796932e+02, -4.2385971907e+03, -9.9501164076e+03,\n -3.3156729271e+04]]\n )\n np.set_printoptions(precision=10)\n assert_array_almost_equal(wmu, ref)\n\n\ndef test_weighted_centroid():\n centroid = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].weighted_centroid\n assert_array_almost_equal(centroid, (5.540540540540, 9.445945945945))\n\n\ndef test_weighted_moments_hu():\n whu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].weighted_moments_hu\n ref = np.array([\n 3.1750587329e-01,\n 2.1417517159e-02,\n 2.3609322038e-02,\n 1.2565683360e-03,\n 8.3014209421e-07,\n -3.5073773473e-05,\n 6.7936409056e-06\n ])\n assert_array_almost_equal(whu, ref)\n\n\ndef test_weighted_moments():\n wm = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].weighted_moments\n ref = np.array(\n [[ 7.4000000000e+01, 4.1000000000e+02, 2.7500000000e+03,\n 1.9778000000e+04],\n [ 6.9900000000e+02, 3.7850000000e+03, 2.4855000000e+04,\n 1.7500100000e+05],\n [ 7.8630000000e+03, 4.4063000000e+04, 2.9347700000e+05,\n 2.0810510000e+06],\n [ 9.7317000000e+04, 5.7256700000e+05, 3.9007170000e+06,\n 2.8078871000e+07]]\n )\n assert_array_almost_equal(wm, ref)\n\n\ndef test_weighted_moments_normalized():\n wnu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE\n )[0].weighted_moments_normalized\n ref = np.array(\n [[ np.nan, np.nan, 0.0873590903, -0.0161217406],\n [ np.nan, -0.0160405109, -0.0031421072, -0.0031376984],\n [ 0.230146783, 0.0457932622, 0.0165315478, 0.0043903193],\n [-0.0162529732, -0.0104598869, -0.0028544152, -0.0011057191]]\n )\n assert_array_almost_equal(wnu, ref)\n\n\ndef test_label_sequence():\n a = np.empty((2, 2), dtype=np.int)\n a[:, :] = 2\n ps = regionprops(a)\n assert len(ps) == 1\n assert ps[0].label == 2\n\n\ndef test_pure_background():\n a = np.zeros((2, 2), dtype=np.int)\n ps = regionprops(a)\n assert len(ps) == 0\n\n\ndef test_invalid():\n ps = regionprops(SAMPLE)\n\n def get_intensity_image():\n ps[0].intensity_image\n\n assert_raises(AttributeError, get_intensity_image)\n\n\ndef test_invalid_size():\n wrong_intensity_sample = np.array([[1], [1]])\n assert_raises(ValueError, regionprops, SAMPLE, wrong_intensity_sample)\n\n\ndef test_equals():\n arr = np.zeros((100, 100), dtype=np.int)\n arr[0:25, 0:25] = 1\n arr[50:99, 50:99] = 2\n\n regions = regionprops(arr)\n r1 = regions[0]\n\n regions = regionprops(arr)\n r2 = regions[0]\n r3 = regions[1]\n\n assert_equal(r1 == r2, True, \"Same regionprops are not equal\")\n assert_equal(r1 != r3, True, \"Different regionprops are equal\")\n\n\ndef test_iterate_all_props():\n region = regionprops(SAMPLE)[0]\n p0 = dict((p, region[p]) for p in region)\n\n region = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[0]\n p1 = dict((p, region[p]) for p in region)\n\n assert len(p0) < len(p1)\n\n\ndef test_cache():\n region = regionprops(SAMPLE)[0]\n f0 = region.filled_image\n region._label_image[:10] = 1\n f1 = region.filled_image\n\n # Changed underlying image, but cache keeps result the same\n assert_array_equal(f0, f1)\n\n # Now invalidate cache\n region._cache_active = False\n f1 = region.filled_image\n\n assert np.any(f0 != f1)\n\n\ndef test_docstrings_and_props():\n region = regionprops(SAMPLE)[0]\n\n docs = _parse_docs()\n props = [m for m in dir(region) if not m.startswith('_')]\n\n nr_docs_parsed = len(docs)\n nr_props = len(props)\n assert_equal(nr_docs_parsed, nr_props)\n\n ds = docs['weighted_moments_normalized']\n assert 'iteration' not in ds\n assert len(ds.split('\\n')) > 3\n\n\nif __name__ == \"__main__\":\n from numpy.testing import run_module_suite\n run_module_suite()\n" ]
[ [ "scipy.interpolate.interp1d", "numpy.argsort", "numpy.asarray", "numpy.abs", "numpy.cos", "numpy.linspace", "numpy.deg2rad", "scipy.fftpack.ifft", "numpy.ceil", "numpy.zeros", "numpy.searchsorted", "numpy.arange", "numpy.all", "numpy.array", "numpy.log2", "numpy.interp", "scipy.fftpack.fft", "scipy.fftpack.fftfreq", "numpy.clip", "numpy.sqrt", "numpy.sin" ], [ "numpy.testing.assert_almost_equal", "numpy.eye", "numpy.testing.assert_raises", "numpy.sum", "numpy.empty", "numpy.zeros", "numpy.flipud", "numpy.testing.assert_equal", "numpy.testing.run_module_suite", "numpy.any", "numpy.set_printoptions", "numpy.testing.assert_array_equal", "numpy.fliplr", "numpy.testing.assert_array_almost_equal", "numpy.array" ] ]
chrisjsewell/aiida-performance
[ "160606f07fe092a9e2bacdf62bfecec460fac642" ]
[ "tests/db_stats.py" ]
[ "\"\"\"Useful queries for profiling PostgreSQL databases\n\nThese queries are mainly adapted from\nhttps://gist.github.com/anvk/475c22cbca1edc5ce94546c871460fdd\n\"\"\"\nfrom functools import wraps\nfrom pathlib import Path\n\n\ndef execute_raw(raw):\n from aiida.manage.manager import get_manager\n\n backend = get_manager()._load_backend(schema_check=False)\n return backend.execute_raw(raw)\n\n\n# ------------------\n# -- Memory Size --\n# ------------------\n\n\ndef memory_db_df():\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n datname,\n pg_database_size(datname)\n from pg_database\n order by pg_database_size(datname);\n \"\"\"\n )\n df = pd.DataFrame(result, columns=[\"database\", \"size_mb\"])\n df[\"size_mb\"] = df[\"size_mb\"] * 1e-6\n return df\n\n\ndef memory_pg_classes_df():\n \"\"\"Return size of `pg_class`'s\n\n `pg_class` catalogs tables and most everything else that has columns,\n or is otherwise similar to a table.\n See https://www.postgresql.org/docs/9.3/catalog-pg-class.html\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n sum(pg_relation_size(pg_class.oid))::bigint,\n nspname,\n CASE pg_class.relkind\n WHEN 'r' THEN 'table'\n WHEN 'i' THEN 'index'\n WHEN 'S' THEN 'sequence'\n WHEN 'v' THEN 'view'\n WHEN 't' THEN 'toast'\n ELSE pg_class.relkind::text\n END\n FROM pg_class\n LEFT OUTER JOIN pg_namespace ON (pg_namespace.oid = pg_class.relnamespace)\n GROUP BY pg_class.relkind, nspname\n ORDER BY sum(pg_relation_size(pg_class.oid)) DESC;\n \"\"\"\n )\n df = pd.DataFrame(result, columns=[\"size_mb\", \"namespace\", \"relkind\"])\n df.sort_index(axis=1, inplace=True)\n df[\"size_mb\"] = df.size_mb * 1e-6\n return df\n\n\ndef memory_tables_df():\n \"\"\"Return statistics on indices.\n\n See https://www.postgresql.org/docs/current/monitoring-stats.html\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n select\n relname,\n pg_relation_size(relname::regclass) as table_size,\n pg_total_relation_size(relname::regclass) - pg_relation_size(relname::regclass) as index_size,\n pg_total_relation_size(relname::regclass) as total_size\n from pg_stat_user_tables\n \"\"\"\n )\n df = pd.DataFrame(result, columns=[\"name\", \"table_mb\", \"indices_mb\", \"total_mb\"])\n df.set_index(\"name\", inplace=True)\n df = df * 1e-6\n df.sort_values(\"total_mb\", ascending=False, inplace=True)\n return df\n\n\n# -------------\n# -- Indices --\n# -------------\n\n\ndef indices_list_df():\n \"\"\"Return list of indices by table and columns.\"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n select\n t.relname as table_name,\n i.relname as index_name,\n string_agg(a.attname, ',') as column_name\n from\n pg_class t,\n pg_class i,\n pg_index ix,\n pg_attribute a\n where\n t.oid = ix.indrelid\n and i.oid = ix.indexrelid\n and a.attrelid = t.oid\n and a.attnum = ANY(ix.indkey)\n and t.relkind = 'r'\n and t.relname not like 'pg_%'\n group by\n t.relname,\n i.relname\n order by\n t.relname,\n i.relname;\n \"\"\"\n )\n df = pd.DataFrame(result, columns=[\"table\", \"index\", \"columns\"])\n df.set_index([\"table\", \"columns\"], inplace=True)\n return df\n\n\ndef indices_stats_df(sort_size=False, with_sql=False):\n \"\"\"Return statistics on indices.\n\n See https://www.postgresql.org/docs/current/monitoring-stats.html\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n pt.tablename AS TableName,\n t.indexname AS IndexName,\n pc.reltuples AS TotalRows,\n pg_relation_size(quote_ident(pt.tablename)::text) AS TableSize,\n pg_relation_size(quote_ident(t.indexrelname)::text) AS IndexSize,\n t.idx_scan AS TotalNumberOfScan,\n t.idx_tup_read AS TotalTupleRead,\n t.idx_tup_fetch AS TotalTupleFetched,\n pgi.indexdef AS IndexDef\n FROM pg_tables AS pt\n LEFT OUTER JOIN pg_class AS pc\n ON pt.tablename=pc.relname\n LEFT OUTER JOIN\n (\n SELECT\n pc.relname AS TableName,\n pc2.relname AS IndexName,\n psai.idx_scan,\n psai.idx_tup_read,\n psai.idx_tup_fetch,\n psai.indexrelname\n FROM\n pg_index AS pi\n JOIN pg_class AS pc\n ON pc.oid = pi.indrelid\n JOIN pg_class AS pc2\n ON pc2.oid = pi.indexrelid\n JOIN pg_stat_all_indexes AS psai\n ON pi.indexrelid = psai.indexrelid\n ) AS T\n ON pt.tablename = T.TableName\n LEFT OUTER JOIN pg_indexes as pgi\n ON T.indexname = pgi.indexname\n WHERE pt.schemaname='public'\n ORDER BY 1;\n \"\"\"\n )\n columns = [\n \"table\",\n \"index\",\n \"rows\",\n \"table_size_mb\",\n \"index_size_mb\",\n # Number of index scans initiated on this index\n \"scans\",\n # Number of index entries returned by scans on this index\n \"read\",\n # Number of live rows fetched by index scans\n \"fetched\",\n \"sql\",\n ]\n df = pd.DataFrame(result, columns=columns)\n df.set_index([\"table\", \"index\"], inplace=True)\n df[\"table_size_mb\"] = df.table_size_mb * 10e-6\n df[\"index_size_mb\"] = df.index_size_mb * 10e-6\n if not with_sql:\n df.drop(\"sql\", axis=1, inplace=True)\n if sort_size:\n df.sort_values(\"index_size_mb\", ascending=False, inplace=True)\n else:\n df.sort_index(axis=0, inplace=True)\n return df\n\n\ndef indices_check_df(min_size_mb=0.1):\n \"\"\"Check for tables that may require an index.\"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n relname,\n seq_scan,\n idx_scan,\n pg_relation_size(relname::regclass) AS rel_size,\n n_live_tup\n FROM pg_stat_all_tables\n WHERE schemaname='public' AND pg_relation_size(relname::regclass)>{min_size};\n \"\"\".format(\n min_size=int(min_size_mb * 1e6)\n )\n )\n df = pd.DataFrame(\n result,\n columns=[\n \"table\",\n # Number of sequential scans initiated on this table\n \"seq_scans\",\n # Number of index scans initiated on this table\n \"idx_scans\",\n \"size_mb\",\n \"live_rows\",\n ],\n )\n df[\"idx_usage\"] = 100 * df.idx_scans / (df.seq_scans + df.idx_scans)\n df[\"idx_required\"] = (df.seq_scans - df.idx_scans) > 0\n df[\"size_mb\"] = df[\"size_mb\"] * 1e-6\n df.set_index(\"table\", inplace=True)\n return df\n\n\n# --------------------\n# -- Data Integrity --\n# --------------------\n\n\ndef cache_hit_ratio():\n \"\"\"Ideally hit_ration should be > 90%\"\"\"\n result = execute_raw(\n r\"\"\"\n SELECT\n sum(blks_hit)*100/sum(blks_hit+blks_read) as hit_ratio\n from pg_stat_database;\n \"\"\"\n )\n return float(result[0][0])\n\n\ndef anomalies_df():\n \"\"\"\n - c_commit_ratio should be > 95%\n - c_rollback_ratio should be < 5%\n - deadlocks should be close to 0\n - conflicts should be close to 0\n - temp_files and temp_bytes watch out for them\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n datname,\n (xact_commit*100)/nullif(xact_commit+xact_rollback,0) as c_commit_ratio,\n (xact_rollback*100)/nullif(xact_commit+xact_rollback, 0) as c_rollback_ratio,\n deadlocks,\n conflicts,\n temp_files,\n temp_bytes\n FROM pg_stat_database;\n \"\"\"\n )\n df = pd.DataFrame(\n result,\n columns=[\n \"database\",\n \"commit_ratio\",\n \"rollback_ratio\",\n \"deadlocks\",\n \"conflicts\",\n \"temp_files\",\n \"temp_size_mb\",\n ],\n )\n df[\"temp_size_mb\"] = df[\"temp_size_mb\"] * 1e-6\n return df\n\n\ndef write_activity_df(limit=50):\n \"\"\"\n hot_rate = rows HOT updated / total rows updated\n (Heap Only Tuple means with no separate index update required)\n\n Heap Only Tuple (HOT) means, creating a new update tuple if possible on the same page as the old tuple.\n Ideally hot_rate should be close to 100.\n You might be blocking HOT updates with indexes on updated columns. If those are expendable, you might get better overall performance without them.\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n s.relname,\n pg_relation_size(relid),\n coalesce(n_tup_ins,0) + 2 * coalesce(n_tup_upd,0) -\n coalesce(n_tup_hot_upd,0) + coalesce(n_tup_del,0) AS total_writes,\n (coalesce(n_tup_hot_upd,0)::float * 100 / (case when n_tup_upd > 0 then n_tup_upd else 1 end)::float) AS hot_rate\n /* This returns None\n (SELECT v[1] FROM regexp_matches(reloptions::text,E'fillfactor=(d+)') as r(v) limit 1) AS fillfactor\n */\n from pg_stat_all_tables\n s join pg_class c ON c.oid=relid\n order by total_writes desc\n limit {limit};\n \"\"\".format(\n limit=limit\n )\n )\n columns = [\n \"table\",\n \"size_mb\",\n \"writes\",\n \"hot_rate\",\n # \"fill_factor\"\n ]\n df = pd.DataFrame(result, columns=columns)\n df[\"size_mb\"] = df[\"size_mb\"] * 1e-6\n df.set_index(\"table\", inplace=True)\n return df\n\n\n# How many indexes are in cache\ndef cached_indices():\n result = execute_raw(\n r\"\"\"\n SELECT\n sum(idx_blks_read) as idx_read,\n sum(idx_blks_hit) as idx_hit,\n (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) as ratio\n FROM pg_statio_user_indexes;\n \"\"\"\n )\n return cached_indices\n\n\ndef dirty_pages():\n \"\"\"maxwritten_clean and buffers_backend_fsyn should be 0\"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT buffers_clean, maxwritten_clean, buffers_backend_fsync from pg_stat_bgwriter;\n \"\"\"\n )\n return pd.Series(\n dict(\n zip(\n (\"buffers_clean\", \"maxwritten_clean\", \"buffers_backend_fsync\"),\n result[0],\n )\n )\n )\n\n\n# -------------\n# -- Queries --\n# -------------\n\n\ndef requires_pg_stat(func):\n @wraps(func)\n def wrapper(*args, **kwds):\n try:\n return func(*args, **kwds)\n except Exception as err:\n if 'relation \"pg_stat_statements\" does not exist' in str(err):\n raise RuntimeError(\n \"This function requires that the pg_stat_statements extension is initialised on your database\"\n )\n raise\n\n return wrapper\n\n\n@requires_pg_stat\ndef query_reset_stats():\n return execute_raw(\"select pg_stat_statements_reset();\")\n\n\n@requires_pg_stat\ndef query_stats_df(limit=100):\n \"\"\"Return most CPU intensive queries\n\n See: https://www.postgresql.org/docs/9.4/pgstatstatements.html\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n query,\n round(total_time::numeric, 2) AS total_time,\n calls,\n rows,\n round((100 * total_time / sum(total_time::numeric) OVER ())::numeric, 2) AS percentage_cpu\n FROM pg_stat_statements\n ORDER BY total_time DESC\n LIMIT {limit};\n \"\"\".format(\n limit=limit\n )\n )\n # avg_time = total_time / calls\n df = pd.DataFrame(\n result, columns=[\"sql\", \"time_seconds\", \"calls\", \"rows\", \"cpu_percent\"]\n )\n df[\"time_seconds\"] = df[\"time_seconds\"].astype(float) * 1e-6\n df[\"type\"] = df.sql.apply(lambda s: s.split()[0].upper())\n return df\n\n\n@requires_pg_stat\ndef query_write_df():\n \"\"\"Return most writing (to shared_buffers) queries\n\n See: https://www.postgresql.org/docs/9.4/pgstatstatements.html\n \"\"\"\n import pandas as pd\n result = execute_raw(\n r\"\"\"\n SELECT\n query,\n shared_blks_dirtied\n from pg_stat_statements\n where shared_blks_dirtied > 0\n order by 2 desc;\n \"\"\"\n )\n return pd.DataFrame(result, columns=[\"sql\", \"blocks_written\"])\n\n\nif __name__ == \"__main__\":\n import argparse, os\n parser = argparse.ArgumentParser()\n parser.add_argument(\"commands\", choices=[\"queries\", \"indices\", \"reset\"], nargs='+')\n parser.add_argument(\"-n\", \"--name\", default=\"test\")\n parser.add_argument(\"-p\", \"--path\", default=os.getcwd())\n args = parser.parse_args()\n\n for _command in args.commands:\n if _command == \"queries\":\n Path(args.path).joinpath(args.name + \"_queries.html\").write_text(query_stats_df().to_html())\n if _command == \"indices\":\n Path(args.path).joinpath(args.name + \"_indices.html\").write_text(indices_stats_df().to_html())\n elif _command == \"reset\":\n query_reset_stats()\n" ]
[ [ "pandas.DataFrame" ] ]
mnicstruwig/optbinning
[ "6ce991e1ca75b4d41835f3b3bf8e0f294f6ba780" ]
[ "optbinning/binning/piecewise/continuous_binning.py" ]
[ "\"\"\"\nOptimal piecewise binning for continuous target.\n\"\"\"\n\n# Guillermo Navas-Palencia <[email protected]>\n# Copyright (C) 2020\n\nimport time\n\nimport numpy as np\n\nfrom .base import _check_parameters\nfrom .base import BasePWBinning\nfrom .binning_statistics import PWContinuousBinningTable\nfrom .metrics import continuous_metrics\nfrom .transformations import transform_continuous_target\n\n\nclass ContinuousOptimalPWBinning(BasePWBinning):\n \"\"\"Optimal Piecewise binning of a numerical variable with respect to a\n binary target.\n\n Parameters\n ----------\n name : str, optional (default=\"\")\n The variable name.\n\n objective : str, optional (default=\"l2\")\n The objective function. Supported objectives are \"l2\", \"l1\", \"huber\"\n and \"quantile\". Note that \"l1\", \"huber\" and \"quantile\" are robust\n objective functions.\n\n degree : int (default=1)\n The degree of the polynomials.\n\n * degree = 0: piecewise constant functions.\n * degree = 1: piecewise linear functions.\n * degree > 1: piecewise polynomial functions.\n\n continuous : bool (default=True)\n Whether to fit a continuous or discontinuous piecewise regression.\n\n prebinning_method : str, optional (default=\"cart\")\n The pre-binning method. Supported methods are \"cart\" for a CART\n decision tree, \"quantile\" to generate prebins with approximately same\n frequency and \"uniform\" to generate prebins with equal width. Method\n \"cart\" uses `sklearn.tree.DecistionTreeClassifier\n <https://scikit-learn.org/stable/modules/generated/sklearn.tree.\n DecisionTreeClassifier.html>`_.\n\n max_n_prebins : int (default=20)\n The maximum number of bins after pre-binning (prebins).\n\n min_prebin_size : float (default=0.05)\n The fraction of mininum number of records for each prebin.\n\n min_n_bins : int or None, optional (default=None)\n The minimum number of bins. If None, then ``min_n_bins`` is\n a value in ``[0, max_n_prebins]``.\n\n max_n_bins : int or None, optional (default=None)\n The maximum number of bins. If None, then ``max_n_bins`` is\n a value in ``[0, max_n_prebins]``.\n\n min_bin_size : float or None, optional (default=None)\n The fraction of minimum number of records for each bin. If None,\n ``min_bin_size = min_prebin_size``.\n\n max_bin_size : float or None, optional (default=None)\n The fraction of maximum number of records for each bin. If None,\n ``max_bin_size = 1.0``.\n\n monotonic_trend : str or None, optional (default=\"auto\")\n The monotonic trend. Supported trends are “auto”, \"auto_heuristic\" and\n \"auto_asc_desc\" to automatically determine the trend maximizing IV\n using a machine learning classifier, \"ascending\", \"descending\",\n \"concave\", \"convex\", \"peak\" and \"peak_heuristic\" to allow a peak change\n point, and \"valley\" and \"valley_heuristic\" to allow a valley change\n point. Trends \"auto_heuristic\", \"peak_heuristic\" and \"valley_heuristic\"\n use a heuristic to determine the change point, and are significantly\n faster for large size instances (``max_n_prebins > 20``). Trend\n \"auto_asc_desc\" is used to automatically select the best monotonic\n trend between \"ascending\" and \"descending\". If None, then the\n monotonic constraint is disabled.\n\n n_subsamples : int or None (default=None)\n Number of subsamples to fit the piecewise regression algorithm. If\n None, all values are considered.\n\n max_pvalue : float or None, optional (default=0.05)\n The maximum p-value among bins. The Z-test is used to detect bins\n not satisfying the p-value constraint. Option supported by solvers\n \"cp\" and \"mip\".\n\n max_pvalue_policy : str, optional (default=\"consecutive\")\n The method to determine bins not satisfying the p-value constraint.\n Supported methods are \"consecutive\" to compare consecutive bins and\n \"all\" to compare all bins.\n\n outlier_detector : str or None, optional (default=None)\n The outlier detection method. Supported methods are \"range\" to use\n the interquartile range based method or \"zcore\" to use the modified\n Z-score method.\n\n outlier_params : dict or None, optional (default=None)\n Dictionary of parameters to pass to the outlier detection method.\n\n user_splits : array-like or None, optional (default=None)\n The list of pre-binning split points when ``dtype`` is \"numerical\" or\n the list of prebins when ``dtype`` is \"categorical\".\n\n user_splits_fixed : array-like or None (default=None)\n The list of pre-binning split points that must be fixed.\n\n special_codes : array-like or None, optional (default=None)\n List of special codes. Use special codes to specify the data values\n that must be treated separately.\n\n split_digits : int or None, optional (default=None)\n The significant digits of the split points. If ``split_digits`` is set\n to 0, the split points are integers. If None, then all significant\n digits in the split points are considered.\n\n solver : str, optional (default=\"auto\")\n The optimizer to solve the underlying mathematical optimization\n problem. Supported solvers are `\"ecos\"\n <https://github.com/embotech/ecos>`_, `\"osqp\"\n <https://github.com/oxfordcontrol/osqp>`_, \"direct\", to choose the\n direct solver, and \"auto\", to choose the most appropriate solver for\n the problem.\n\n h_epsilon: float (default=1.35)\n The parameter h_epsilon used when ``objective=\"huber\"``, controls the\n number of samples that should be classified as outliers.\n\n quantile : float (default=0.5)\n The parameter quantile is the q-th quantile to be used when\n ``objective=\"quantile\"``.\n\n regularization: str or None (default=None)\n Type of regularization. Supported regularization are \"l1\" (Lasso) and\n \"l2\" (Ridge). If None, no regularization is applied.\n\n reg_l1 : float (default=1.0)\n L1 regularization term. Increasing this value will smooth the\n regression model. Only applicable if ``regularization=\"l1\"``.\n\n reg_l2 : float (default=1.0)\n L2 regularization term. Increasing this value will smooth the\n regression model. Only applicable if ``regularization=\"l2\"``.\n\n random_state : int, RandomState instance or None, (default=None)\n If ``n_subsamples < n_samples``, controls the shuffling applied to the\n data before applying the split.\n\n verbose : bool (default=False)\n Enable verbose output.\n \"\"\"\n def __init__(self, name=\"\", objective=\"l2\", degree=1,\n continuous=True, prebinning_method=\"cart\", max_n_prebins=20,\n min_prebin_size=0.05, min_n_bins=None, max_n_bins=None,\n min_bin_size=None, max_bin_size=None, monotonic_trend=\"auto\",\n n_subsamples=None, max_pvalue=None,\n max_pvalue_policy=\"consecutive\", outlier_detector=None,\n outlier_params=None, user_splits=None, user_splits_fixed=None,\n special_codes=None, split_digits=None, solver=\"auto\",\n h_epsilon=1.35, quantile=0.5, regularization=None, reg_l1=1.0,\n reg_l2=1.0, random_state=None, verbose=False):\n\n super().__init__(name, None, objective, degree, continuous,\n prebinning_method, max_n_prebins, min_prebin_size,\n min_n_bins, max_n_bins, min_bin_size, max_bin_size,\n monotonic_trend, n_subsamples, max_pvalue,\n max_pvalue_policy, outlier_detector, outlier_params,\n user_splits, user_splits_fixed, special_codes,\n split_digits, solver, h_epsilon, quantile,\n regularization, reg_l1, reg_l2, random_state, verbose)\n\n self._problem_type = \"regression\"\n\n self._n_records_missing = None\n self._n_records_special = None\n self._sum_special = None\n self._sum_missing = None\n self._std_special = None\n self._std_missing = None\n self._min_target_missing = None\n self._min_target_special = None\n self._max_target_missing = None\n self._max_target_special = None\n self._n_zeros_missing = None\n self._n_zeros_special = None\n\n def fit_transform(self, x, y, metric_special=0, metric_missing=0,\n lb=None, ub=None, check_input=False):\n \"\"\"Fit the optimal piecewise binning according to the given training\n data, then transform it.\n\n Parameters\n ----------\n x : array-like, shape = (n_samples,)\n Training vector, where n_samples is the number of samples.\n\n y : array-like, shape = (n_samples,)\n Target vector relative to x.\n\n metric_special : float or str (default=0)\n The metric value to transform special codes in the input vector.\n Supported metrics are \"empirical\" to use the empirical mean and any\n numerical value.\n\n metric_missing : float or str (default=0)\n The metric value to transform missing values in the input vector.\n Supported metrics are \"empirical\" to use the empirical mean and any\n numerical value.\n\n lb : float or None (default=None)\n Avoid values below the lower bound lb.\n\n ub : float or None (default=None)\n Avoid values above the upper bound ub.\n\n check_input : bool (default=False)\n Whether to check input arrays.\n\n Returns\n -------\n x_new : numpy array, shape = (n_samples,)\n Transformed array.\n \"\"\"\n return self.fit(x, y, check_input).transform(\n x, metric_special, metric_missing, lb, ub, check_input)\n\n def transform(self, x, metric_special=0, metric_missing=0,\n lb=None, ub=None, check_input=False):\n \"\"\"Transform given data using bins from the fitted optimal piecewise\n binning.\n\n Parameters\n ----------\n x : array-like, shape = (n_samples,)\n Training vector, where n_samples is the number of samples.\n\n metric_special : float or str (default=0)\n The metric value to transform special codes in the input vector.\n Supported metrics are \"empirical\" to use the empirical mean and any\n numerical value.\n\n metric_missing : float or str (default=0)\n The metric value to transform missing values in the input vector.\n Supported metrics are \"empirical\" to use the empirical mean and any\n numerical value.\n\n lb : float or None (default=None)\n Avoid values below the lower bound lb.\n\n ub : float or None (default=None)\n Avoid values above the upper bound ub.\n\n check_input : bool (default=False)\n Whether to check input arrays.\n\n Returns\n -------\n x_new : numpy array, shape = (n_samples,)\n Transformed array.\n \"\"\"\n self._check_is_fitted()\n\n return transform_continuous_target(\n self._optb.splits, x, self._c, lb, ub, self._n_records_special,\n self._sum_special, self._n_records_missing, self._sum_missing,\n self.special_codes, metric_special, metric_missing, check_input)\n\n def _fit(self, x, y, lb, ub, check_input):\n time_init = time.perf_counter()\n\n if self.verbose:\n self._logger.info(\"Optimal piecewise binning started.\")\n self._logger.info(\"Options: check parameters.\")\n\n _check_parameters(**self.get_params(deep=False), estimator=None,\n problem_type=self._problem_type)\n\n # Pre-processing\n if self.verbose:\n self._logger.info(\"Pre-processing started.\")\n\n self._n_samples = len(x)\n\n if self.verbose:\n self._logger.info(\"Pre-processing: number of samples: {}\"\n .format(self._n_samples))\n\n time_preprocessing = time.perf_counter()\n\n [x_clean, y_clean, x_missing, y_missing, x_special, y_special,\n _, _, _, _, _, _, _] = self._fit_preprocessing(x, y, check_input)\n\n self._time_preprocessing = time.perf_counter() - time_preprocessing\n\n if self.verbose:\n n_clean = len(x_clean)\n n_missing = len(x_missing)\n n_special = len(x_special)\n\n self._logger.info(\"Pre-processing: number of clean samples: {}\"\n .format(n_clean))\n\n self._logger.info(\"Pre-processing: number of missing samples: {}\"\n .format(n_missing))\n\n self._logger.info(\"Pre-processing: number of special samples: {}\"\n .format(n_special))\n\n if self.outlier_detector is not None:\n n_outlier = self._n_samples-(n_clean + n_missing + n_special)\n self._logger.info(\"Pre-processing: number of outlier samples: \"\n \"{}\".format(n_outlier))\n\n self._logger.info(\"Pre-processing terminated. Time: {:.4f}s\"\n .format(self._time_preprocessing))\n\n # Pre-binning\n self._time_estimator = 0\n\n # Fit optimal binning algorithm for continuous target. Use optimal\n # split points to compute optimal piecewise functions\n self._fit_binning(x_clean, y_clean, y_clean, lb, ub)\n\n # Post-processing\n if self.verbose:\n self._logger.info(\"Post-processing started.\")\n self._logger.info(\"Post-processing: compute binning information.\")\n\n time_postprocessing = time.perf_counter()\n\n # Compute n_records and sum for special and missing\n self._n_records_special = len(y_special)\n self._sum_special = np.sum(y_special)\n self._n_zeros_special = np.count_nonzero(y_special == 0)\n if len(y_special):\n self._std_special = np.std(y_special)\n self._min_target_special = np.min(y_special)\n self._max_target_special = np.max(y_special)\n\n self._n_records_missing = len(y_missing)\n self._sum_missing = np.sum(y_missing)\n self._n_zeros_missing = np.count_nonzero(y_missing == 0)\n if len(y_missing):\n self._std_missing = np.std(y_missing)\n self._min_target_missing = np.min(y_missing)\n self._max_target_missing = np.max(y_missing)\n\n bt = self._optb.binning_table.build(add_totals=False)\n n_records = bt[\"Count\"].values\n sums = bt[\"Sum\"].values\n stds = bt[\"Std\"].values\n min_target = bt[\"Min\"].values\n max_target = bt[\"Max\"].values\n n_zeros = bt[\"Zeros count\"].values\n\n n_records[self._n_bins] = self._n_records_special\n n_records[self._n_bins + 1] = self._n_records_missing\n sums[self._n_bins] = self._sum_special\n sums[self._n_bins + 1] = self._sum_missing\n stds[self._n_bins] = self._std_special\n stds[self._n_bins + 1] = self._std_missing\n min_target[self._n_bins] = self._min_target_special\n min_target[self._n_bins + 1] = self._min_target_missing\n max_target[self._n_bins] = self._max_target_special\n max_target[self._n_bins + 1] = self._max_target_missing\n n_zeros[self._n_bins] = self._n_zeros_special\n n_zeros[self._n_bins + 1] = self._n_zeros_missing\n\n # Compute metrics\n if self.verbose:\n self._logger.info(\"Post-processing: compute performance metrics.\")\n\n d_metrics = continuous_metrics(\n x_clean, y_clean, self._optb.splits, self._c, lb, ub,\n self._n_records_special, self._sum_special,\n self._n_records_missing, self._sum_missing, self.special_codes)\n\n # Binning table\n self._binning_table = PWContinuousBinningTable(\n self.name, self._optb.splits, self._c, n_records, sums, stds,\n min_target, max_target, n_zeros, lb, ub, x_clean.min(),\n x_clean.max(), d_metrics)\n\n self._time_postprocessing = time.perf_counter() - time_postprocessing\n\n if self.verbose:\n self._logger.info(\"Post-processing terminated. Time: {:.4f}s\"\n .format(self._time_postprocessing))\n\n self._time_total = time.perf_counter() - time_init\n\n if self.verbose:\n self._logger.info(\"Optimal piecewise binning terminated. \"\n \"Status: {}. Time: {:.4f}s\"\n .format(self._status, self._time_total))\n\n # Completed successfully\n self._class_logger.close()\n self._is_fitted = True\n\n return self\n" ]
[ [ "numpy.sum", "numpy.count_nonzero", "numpy.max", "numpy.min", "numpy.std" ] ]
mmwebster/DeepRL-Grounding
[ "aa7fa63fbc26e8b0fa3fe289a5fe5a00ef3e6278" ]
[ "a3c_train.py" ]
[ "import torch.optim as optim\nimport env as grounding_env\n\nfrom models import *\nfrom torch.autograd import Variable\n\nimport logging\n\n\ndef ensure_shared_grads(model, shared_model):\n for param, shared_param in zip(model.parameters(),\n shared_model.parameters()):\n if shared_param.grad is not None:\n return\n shared_param._grad = param.grad\n\n\ndef train(rank, args, shared_model):\n torch.manual_seed(args.seed + rank)\n\n env = grounding_env.GroundingEnv(args)\n env.game_init()\n\n model = A3C_LSTM_GA(args)\n\n if (args.load != \"0\"):\n print(str(rank) + \" Loading model ... \"+args.load)\n model.load_state_dict(\n torch.load(args.load, map_location=lambda storage, loc: storage))\n\n model.train()\n\n optimizer = optim.SGD(shared_model.parameters(), lr=args.lr)\n\n p_losses = []\n v_losses = []\n\n (image, instruction), _, _, _ = env.reset()\n instruction_idx = []\n for word in instruction.split(\" \"):\n instruction_idx.append(env.word_to_idx[word])\n instruction_idx = np.array(instruction_idx)\n\n image = torch.from_numpy(image).float()/255.0\n instruction_idx = torch.from_numpy(instruction_idx).view(1, -1)\n\n done = True\n\n episode_length = 0\n num_iters = 0\n while True:\n # Sync with the shared model\n model.load_state_dict(shared_model.state_dict())\n if done:\n episode_length = 0\n cx = Variable(torch.zeros(1, 256))\n hx = Variable(torch.zeros(1, 256))\n\n else:\n cx = Variable(cx.data)\n hx = Variable(hx.data)\n\n values = []\n log_probs = []\n rewards = []\n entropies = []\n\n for step in range(args.num_steps):\n episode_length += 1\n tx = Variable(torch.from_numpy(np.array([episode_length])).long())\n\n value, logit, (hx, cx) = model((Variable(image.unsqueeze(0)),\n Variable(instruction_idx),\n (tx, hx, cx)))\n prob = F.softmax(logit)\n log_prob = F.log_softmax(logit)\n entropy = -(log_prob * prob).sum(1)\n entropies.append(entropy)\n\n action = prob.multinomial(num_samples=1).data\n log_prob = log_prob.gather(1, Variable(action))\n\n action = action.numpy()[0, 0]\n (image, _), reward, done, _ = env.step(action)\n\n done = done or episode_length >= args.max_episode_length\n\n if done:\n (image, instruction), _, _, _ = env.reset()\n instruction_idx = []\n for word in instruction.split(\" \"):\n instruction_idx.append(env.word_to_idx[word])\n instruction_idx = np.array(instruction_idx)\n instruction_idx = torch.from_numpy(\n instruction_idx).view(1, -1)\n\n image = torch.from_numpy(image).float()/255.0\n\n values.append(value)\n log_probs.append(log_prob)\n rewards.append(reward)\n\n if done:\n break\n\n R = torch.zeros(1, 1)\n if not done:\n tx = Variable(torch.from_numpy(np.array([episode_length])).long())\n value, _, _ = model((Variable(image.unsqueeze(0)),\n Variable(instruction_idx), (tx, hx, cx)))\n R = value.data\n\n values.append(Variable(R))\n policy_loss = 0\n value_loss = 0\n R = Variable(R)\n\n gae = torch.zeros(1, 1)\n for i in reversed(range(len(rewards))):\n R = args.gamma * R + rewards[i]\n advantage = R - values[i]\n value_loss = value_loss + 0.5 * advantage.pow(2)\n\n # Generalized Advantage Estimataion\n delta_t = rewards[i] + args.gamma * \\\n values[i + 1].data - values[i].data\n gae = gae * args.gamma * args.tau + delta_t\n\n policy_loss = policy_loss - \\\n log_probs[i] * Variable(gae) - 0.01 * entropies[i]\n\n optimizer.zero_grad()\n\n p_losses.append(policy_loss.data[0, 0])\n v_losses.append(value_loss.data[0, 0])\n\n if(len(p_losses) > 1000):\n num_iters += 1\n print(\" \".join([\n \"Training thread: {}\".format(rank),\n \"Num iters: {}K\".format(num_iters),\n \"Avg policy loss: {}\".format(np.mean(p_losses)),\n \"Avg value loss: {}\".format(np.mean(v_losses))]))\n logging.info(\" \".join([\n \"Training thread: {}\".format(rank),\n \"Num iters: {}K\".format(num_iters),\n \"Avg policy loss: {}\".format(np.mean(p_losses)),\n \"Avg value loss: {}\".format(np.mean(v_losses))]))\n p_losses = []\n v_losses = []\n\n (policy_loss + 0.5 * value_loss).backward()\n torch.nn.utils.clip_grad_norm(model.parameters(), 40)\n\n ensure_shared_grads(model, shared_model)\n optimizer.step()\n" ]
[ [ "torch.autograd.Variable" ] ]
mwojcik96/dtw-utterance-recognition
[ "9371393dfe92abb5b85c40828d099ceca599aa89" ]
[ "main.py" ]
[ "import glob\nimport struct\nimport wave\nfrom collections import Counter\nfrom operator import itemgetter\n\nimport librosa\nimport numpy as np\nfrom tslearn.metrics import dtw\n\n\ndef compute_mfcc_from_file(file):\n time_characteristic = create_time_characteristics_of_a_file(file)\n mfcc = librosa.feature.mfcc(y=time_characteristic, sr=16000, n_mfcc=13)\n return mfcc\n\n\ndef create_time_characteristics_of_a_file(file):\n wave_file = wave.open(file, 'r')\n # rate = wave_file.getframerate()\n length = wave_file.getnframes()\n time_plot = []\n for i in range(0, length):\n wave_data = wave_file.readframes(1)\n data = struct.unpack(\"<h\", wave_data)\n time_plot.append(int(data[0]))\n return np.array(time_plot, dtype=np.float32)\n\n\ndef compute_spectral_roloff(file):\n chars = create_time_characteristics_of_a_file(file)\n return librosa.feature.spectral_rolloff(chars, sr=16000)[0]\n\n\ndef calculate_dict(mfcc_values, rolloff_values, names, labels):\n final_dict = dict()\n for i in names:\n final_dict[i] = []\n for id1, (mf1, ro1, nm1, lb1) in enumerate(zip(mfcc_values, rolloff_values, names, labels)):\n for id2, (mf2, ro2, nm2, lb2) in enumerate(zip(mfcc_values, rolloff_values, names, labels)):\n if id1 < id2:\n current_dtw = dtw(mf1, mf2)\n # current_dtw = dtw(mf1 + ro1, mf2 + ro2)\n final_dict[nm1].append({\"name\": nm2, \"label\": lb2, \"distance\": current_dtw})\n final_dict[nm2].append({\"name\": nm1, \"label\": lb1, \"distance\": current_dtw})\n for final_key, final_item in final_dict.items():\n final_dict[final_key] = sorted(final_item, key=itemgetter('distance'))\n # print(key, len(final_dict[key]))\n return final_dict\n\n\ndef recognize_speech(vector, k=1):\n nearest_neighbours = Counter(elem[\"label\"] for elem in vector[:k])\n return nearest_neighbours.most_common(1)[0][0]\n\n\nif __name__ == '__main__':\n mfcc_list = []\n rolloff_list = []\n name_list = []\n label_list = []\n for wav_name in glob.glob(\"./*/*.WAV\"):\n mfcc_list.append(compute_mfcc_from_file(wav_name).T)\n rolloff_list.append(compute_spectral_roloff(wav_name))\n name_list.append(wav_name.split(\"/\")[-1])\n label_list.append(wav_name.split(\"/\")[-2])\n dist_dict = calculate_dict(mfcc_list, rolloff_list, name_list, label_list)\n for n in range(1, 11):\n accuracy = 0\n print(\"KNN for k =\", n)\n for key, item in dist_dict.items():\n real = label_list[name_list.index(key)]\n predicted = recognize_speech(item, n)\n # print(key, \"Real:\", real, \"Predicted:\", predicted)\n if real == predicted:\n accuracy += 1\n print(\"Accuracy:\", accuracy / len(name_list))\n" ]
[ [ "numpy.array" ] ]
mas-dse-ringhilt/DSE-American-Gut-Project
[ "dadb3be8d40d6fb325d26920b145c04c837a6869" ]
[ "american_gut_project_pipeline/pipeline/metrics.py" ]
[ "import pandas as pd\nfrom sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score\n\n\ndef evaluate(clf, x_train, x_test, y_train, y_test, name, training_data_name, embedding, params=None):\n predictions = clf.predict(x_train)\n # train_tn, train_fp, train_fn, train_tp = confusion_matrix(y_train, predictions).ravel()\n train_accuracy = accuracy_score(y_train, predictions)\n # train_precision = precision_score(y_train, predictions)\n # train_recall = recall_score(y_train, predictions)\n train_f1_score = f1_score(y_train, predictions, average='weighted')\n\n predictions = clf.predict(x_test)\n # test_tn, test_fp, test_fn, test_tp = confusion_matrix(y_test, predictions).ravel()\n test_accuracy = accuracy_score(y_test, predictions)\n # test_precision = precision_score(y_test, predictions)\n # test_recall = recall_score(y_test, predictions)\n test_f1_score = f1_score(y_test, predictions, average='weighted')\n\n result_dict = {\n 'name': [name],\n 'embedding': [embedding],\n 'params': [params],\n 'training_data_name': [training_data_name],\n # 'train_true_negative': [train_tn],\n # 'train_false_positive': [train_fp],\n # 'train_false_negative': [train_fn],\n # 'train_true_positive': [train_tp],\n 'train_accuracy': [train_accuracy],\n # 'train_precision': [train_precision],\n # 'train_recall': [train_recall],\n 'train_f1_score': [train_f1_score],\n\n # 'test_true_negative': [test_tn],\n # 'test_false_positive': [test_fp],\n # 'test_false_negative': [test_fn],\n # 'test_true_positive': [test_tp],\n 'test_accuracy': [test_accuracy],\n # 'test_precision': [test_precision],\n # 'test_recall': [test_recall],\n 'test_f1_score': [test_f1_score],\n }\n\n return pd.DataFrame(result_dict)\n" ]
[ [ "sklearn.metrics.f1_score", "sklearn.metrics.accuracy_score", "pandas.DataFrame" ] ]
grandevelia/ProDy
[ "7c725640a94c16543423c0756388998cb86a97ae" ]
[ "prody/chromatin/hic.py" ]
[ "from numbers import Integral\n\nfrom numpy import ma\nimport numpy as np\nfrom scipy.sparse import coo_matrix\nfrom scipy.stats import mode\nfrom prody.chromatin.norm import VCnorm, SQRTVCnorm, Filenorm\nfrom prody.chromatin.functions import div0, showDomains, _getEigvecs\n\nfrom prody import PY2K\nfrom prody.dynamics import GNM, MaskedGNM\nfrom prody.dynamics.functions import writeArray\nfrom prody.dynamics.mode import Mode\nfrom prody.dynamics.modeset import ModeSet\n\nfrom prody.utilities import openFile, importLA, showMatrix, isURL, fixArraySize, makeSymmetric\n\n__all__ = ['HiC', 'parseHiC', 'parseHiCStream', 'parseHiCBinary', 'saveHiC', 'loadHiC', 'writeMap']\n\nclass HiC(object):\n\n \"\"\"This class is used to store and preprocess Hi-C contact map. A :class:`.GNM`\n instance for analyzing the contact map can be also created by using this class.\n \"\"\"\n\n def __init__(self, title='Unknown', map=None, bin=None):\n self._title = title\n self._map = None\n self.mask = False\n self._labels = 0\n self.masked = True\n self.bin = bin\n self.map = map\n \n @property\n def map(self):\n if self.masked:\n return self.getTrimedMap()\n else:\n return self._map\n\n @map.setter\n def map(self, value):\n if value is None: \n self._map = None\n else:\n self._map = np.asarray(value)\n self._map = makeSymmetric(self._map)\n self._maskUnmappedRegions()\n self._labels = np.zeros(len(self._map), dtype=int)\n\n def __repr__(self):\n mask = self.mask\n \n if np.isscalar(mask):\n return '<HiC: {0} ({1} loci)>'.format(self._title, len(self._map))\n else:\n return '<HiC: {0} ({1} mapped loci; {2} in total)>'.format(self._title, np.count_nonzero(mask), len(self._map))\n\n def __str__(self):\n\n return 'HiC ' + self._title\n\n def __getitem__(self, index):\n if isinstance(index, Integral):\n return self.map.flatten()[index]\n else:\n i, j = index\n return self.map[i,j]\n\n def __len__(self):\n mask = self.mask \n \n if np.isscalar(mask):\n return len(self._map)\n else:\n return np.count_nonzero(mask)\n \n def numAtoms(self):\n return len(self.map)\n\n def getTitle(self):\n \"\"\"Returns title of the instance.\"\"\"\n\n return self._title\n\n def setTitle(self, title):\n \"\"\"Sets title of the instance.\"\"\"\n\n self._title = str(title)\n\n def getCompleteMap(self):\n \"\"\"Obtains the complete contact map with unmapped regions.\"\"\"\n\n return self._map\n \n def getTrimedMap(self):\n \"\"\"Obtains the contact map without unmapped regions.\"\"\"\n\n if self._map is None: \n return None\n if np.isscalar(self.mask):\n return self._map\n\n M = ma.array(self._map)\n M.mask = np.diag(~self.mask)\n return ma.compress_rowcols(M)\n \n def align(self, array, axis=None):\n if not isinstance(array, np.ndarray):\n array = np.array(array)\n\n ret = array = array.copy()\n\n if np.isscalar(self.mask):\n return ret\n\n mask = self.mask.copy()\n\n l_full = self.getCompleteMap().shape[0]\n l_trim = self.getTrimedMap().shape[0]\n \n if len(array.shape) == 0:\n raise ValueError('array cannot be empty')\n elif len(array.shape) == 1:\n l = array.shape[0]\n if l == l_trim:\n N = len(mask)\n ret = np.zeros(N, dtype=array.dtype)\n ret[mask] = array\n elif l == l_full:\n ret = array[mask]\n else:\n raise ValueError('The length of array (%d) does not '\n 'match that of either the full (%d) '\n 'or trimed (%d).'\n %(l, l_full, l_trim))\n elif len(array.shape) == 2:\n s = array.shape\n\n if axis is None:\n if s[0] != s[1]:\n raise ValueError('The array must be a square matrix '\n 'if axis is set to None.')\n if s[0] == l_trim:\n N = len(mask)\n whole_mat = np.zeros((N,N), dtype=array.dtype)\n mask = np.outer(mask, mask)\n whole_mat[mask] = array.flatten()\n ret = whole_mat\n elif s[0] == l_full:\n M = ma.array(array)\n M.mask = np.diag(mask)\n ret = ma.compress_rowcols(M)\n else:\n raise ValueError('The size of array (%d) does not '\n 'match that of either the full (%d) '\n 'or trimed (%d).'\n %(s[0], l_full, l_trim))\n else:\n new_shape = list(s)\n otheraxis = 0 if axis!=0 else 1\n if s[axis] == l_trim:\n N = len(mask)\n new_shape[axis] = N\n whole_mat = np.zeros(new_shape)\n mask = np.expand_dims(mask, axis=otheraxis)\n mask = mask.repeat(s[otheraxis], axis=otheraxis)\n whole_mat[mask] = array.flatten()\n ret = whole_mat\n elif s[axis] == l_full:\n mask = np.expand_dims(mask, axis=otheraxis)\n mask = mask.repeat(s[otheraxis])\n ret = self._map[mask]\n else:\n raise ValueError('The size of array (%d) does not '\n 'match that of either the full (%d) '\n 'or trimed (%d).'\n %(s[0], l_full, l_trim))\n \n return ret\n\n def getKirchhoff(self):\n \"\"\"Builds a Kirchhoff matrix based on the contact map.\"\"\"\n\n if self._map is None:\n return None\n else:\n M = self.map\n \n I = np.eye(M.shape[0], dtype=bool)\n A = M.copy()\n A[I] = 0.\n D = np.diag(np.sum(A, axis=0))\n K = D - A\n return K\n\n def _maskUnmappedRegions(self, diag=False):\n \"\"\"Finds and masks unmapped regions in the contact map.\"\"\"\n\n M = self._map\n if M is None: return\n\n if diag:\n # Obtain the diagonal values, need to make sure d is an array \n # instead of a matrix, otherwise diag() later will not work as \n # intended.\n d = np.array(np.diag(M))\n else:\n d = np.array(M.sum(0))\n\n # mask if a diagonal value is zero\n mask_zero = np.array(d==0)\n # mask if a diagonal value is NAN\n mask_nan = np.isnan(d)\n # combine two masks\n mask = np.logical_or(mask_nan, mask_zero)\n self.mask = ~mask\n\n return self.mask\n \n def calcGNM(self, n_modes=None, **kwargs):\n \"\"\"Calculates GNM on the current Hi-C map. By default, ``n_modes`` is \n set to **None** and ``zeros`` to **True**.\"\"\"\n\n if 'zeros' not in kwargs:\n kwargs['zeros'] = True\n \n if self.masked:\n gnm = MaskedGNM(self._title, self.mask)\n else:\n gnm = GNM(self._title)\n gnm.setKirchhoff(self.getKirchhoff())\n gnm.calcModes(n_modes=n_modes, **kwargs)\n return gnm\n \n def normalize(self, method=VCnorm, **kwargs):\n \"\"\"Applies chosen normalization on the current Hi-C map.\"\"\"\n\n M = self._map\n N = method(M, **kwargs)\n self.map = N\n return N\n \n def setDomains(self, labels, **kwargs):\n \"\"\"Uses spectral clustering to identify structural domains on the chromosome.\n \n :arg labels: domain labels\n :type labels: :class:`~numpy.ndarray`, list\n\n :arg method: Label assignment algorithm used after Laplacian embedding.\n :type method: func\n \"\"\"\n wastrimmed = self.masked\n\n self.masked = True\n if len(labels) == self.numAtoms():\n full_length = self.numAtoms()\n if full_length != len(labels):\n _labels = np.empty(full_length)\n _labels.fill(np.nan)\n _labels[self.mask] = labels\n\n currlbl = labels[0]\n\n for i in range(len(_labels)):\n l = _labels[i]\n if np.isnan(l):\n _labels[i] = currlbl\n elif currlbl != l:\n currlbl = l\n labels = _labels\n else:\n self.masked = False\n if len(labels) != self.numAtoms():\n raise ValueError('The length of the labels should match either the length '\n 'of masked or complete Hi-C map. Turn off \"masked\" if '\n 'you intended to set the labels to the full map.')\n \n self.masked = wastrimmed\n self._labels = labels\n return self.getDomains()\n \n def getDomains(self):\n \"\"\"Returns an 1D :class:`numpy.ndarray` whose length is the number of loci. Each \n element is an index denotes to which domain the locus belongs.\"\"\"\n\n lbl = self._labels\n mask = self.mask\n if self.masked:\n lbl = lbl[mask]\n return lbl\n\n def getDomainList(self):\n \"\"\"Returns a list of domain separations. The list has two columns: the first is for \n the domain starts and the second is for the domain ends.\"\"\"\n\n indicators = np.diff(self.getDomains())\n indicators = np.append(1., indicators)\n indicators[-1] = 1\n sites = np.where(indicators != 0)[0]\n starts = sites[:-1]\n ends = sites[1:]\n domains = np.array([starts, ends]).T\n\n return domains\n\n def view(self, spec='p', **kwargs):\n \"\"\"Visualization of the Hi-C map and domains (if present). The function makes use \n of :func:`.showMatrix`.\n \n :arg spec: a string specifies how to preprocess the matrix. Blank for no preprocessing,\n 'p' for showing only data from *p*-th to *100-p*-th percentile. '_' is to suppress \n creating a new figure and paint to the current one instead. The letter specifications \n can be applied sequentially, e.g. 'p_'.\n :type spec: str\n\n :arg p: specifies the percentile threshold.\n :type p: double\n \"\"\"\n\n dm_kwargs = {}\n keys = list(kwargs.keys())\n for k in keys:\n if k.startswith('dm_'):\n dm_kwargs[k[3:]] = kwargs.pop(k)\n elif k.startswith('domain_'):\n dm_kwargs[k[7:]] = kwargs.pop(k)\n\n M = self.map\n if 'p' in spec:\n p = kwargs.pop('p', 5)\n lp = kwargs.pop('lp', p)\n hp = kwargs.pop('hp', 100-p)\n vmin = np.percentile(M, lp)\n vmax = np.percentile(M, hp)\n else:\n vmin = vmax = None\n\n if not 'vmin' in kwargs:\n kwargs['vmin'] = vmin\n if not 'vmax' in kwargs:\n kwargs['vmax'] = vmax\n\n im = showMatrix(M, **kwargs)\n\n domains = self.getDomainList()\n if len(domains) > 1:\n showDomains(domains, **dm_kwargs)\n\n return im\n\n def copy(self):\n new = type(self)()\n new.__dict__.update(self.__dict__)\n return new\n \n __copy__ = copy\n\n\ndef parseHiC(filename, **kwargs):\n \"\"\"Returns an :class:`.HiC` from a Hi-C data file.\n\n This function extends :func:`.parseHiCStream`.\n\n :arg filename: the filename to the Hi-C data file.\n :type filename: str\n \"\"\"\n\n import os, struct\n title = kwargs.get('title')\n if title is None:\n title = os.path.basename(filename)\n else:\n title = kwargs.pop('title')\n\n if isURL(filename):\n M, res = parseHiCBinary(filename, title=title, **kwargs)\n else:\n with open(filename,'rb') as req:\n magic_number = struct.unpack('<3s',req.read(3))[0]\n if magic_number == b\"HIC\":\n M, res = parseHiCBinary(filename, title=title, **kwargs)\n else:\n with open(filename, 'r') as filestream:\n M, res = parseHiCStream(filestream, title=title, **kwargs)\n \n hic = HiC(title=title, map=M, bin=res)\n\n return hic\n\ndef _sparse2dense(I, J, values, bin=None):\n I = np.asarray(I, dtype=int)\n J = np.asarray(J, dtype=int)\n values = np.asarray(values, dtype=float)\n # determine the bin size by the most frequent interval\n if bin is None:\n loci = np.unique(np.sort(I))\n bins = np.diff(loci)\n bin = mode(bins)[0][0]\n # convert coordinate from basepair to locus index\n bin = int(bin)\n I = I // bin\n J = J // bin\n # make sure that the matrix is square\n # if np.max(I) != np.max(J):\n # b = np.max(np.append(I, J))\n # I = np.append(I, b)\n # J = np.append(J, b)\n # values = np.append(values, 0.)\n # Convert to sparse matrix format, then full matrix format\n # and finally array type. Matrix format is avoided because\n # diag() won't work as intended for Matrix instances.\n M = np.array(coo_matrix((values, (I, J))).todense())\n return M, bin\n\ndef parseHiCStream(stream, **kwargs):\n \"\"\"Returns an :class:`.HiC` from a stream of Hi-C data lines.\n\n :arg stream: Anything that implements the method ``read``, ``seek``\n (e.g. :class:`file`, buffer, stdin)\n \"\"\"\n\n issparse = kwargs.get('sparse', None)\n\n import csv\n dialect = csv.Sniffer().sniff(stream.read(1024))\n stream.seek(0)\n reader = csv.reader(stream, dialect)\n D = list()\n for row in reader:\n d = list()\n for element in row:\n d.append(np.double(element))\n D.append(d)\n D = np.array(D)\n\n res = kwargs.get('bin', None)\n if res is not None:\n res = int(res)\n size = D.shape\n if len(D.shape) <= 1:\n raise ValueError(\"cannot parse the file: input file only contains one column.\")\n \n if issparse is None:\n issparse = size[1] == 3\n\n if not issparse:\n M = D\n else:\n try:\n I, J, values = D.T[:3]\n except ValueError:\n raise ValueError('the sparse matrix format should have three columns')\n \n M, res = _sparse2dense(I, J, values, bin=res)\n return M, res\n\ndef parseHiCBinary(filename, **kwargs):\n\n chrloc = kwargs.get('chrom', None)\n if chrloc is None:\n raise ValueError('chrom needs to be specified when parsing .hic format')\n chrloc1 = kwargs.get('chrom1', chrloc)\n chrloc2 = kwargs.get('chrom2', chrloc)\n norm = kwargs.get('norm', 'NONE')\n unit = kwargs.get('unit', 'BP')\n res = kwargs.get('binsize', None)\n res = kwargs.get('bin', res)\n if res is None:\n raise ValueError('bin needs to be specified when parsing .hic format')\n res = int(res)\n\n from .straw import straw\n result = straw(norm, filename, chrloc1, chrloc2, unit, res)\n\n M, res = _sparse2dense(*result, bin=res)\n return M, res\n\ndef writeMap(filename, map, bin=None, format='%f'):\n \"\"\"Writes *map* to the file designated by *filename*.\n\n :arg filename: the file to be written.\n :type filename: str\n\n :arg map: a Hi-C contact map.\n :type map: :class:`numpy.ndarray`\n\n :arg bin: bin size of the *map*. If bin is `None`, *map* will be \n written in full matrix format.\n :type bin: int\n\n :arg format: output format for map elements.\n :type format: str\n \"\"\"\n\n assert isinstance(map, np.ndarray), 'map must be a numpy.ndarray.'\n\n if bin is None:\n return writeArray(filename, map, format=format)\n else:\n L = int(map.size - np.diag(map).size)//2 + np.diag(map).size\n spmat = np.zeros((L, 3))\n m,n = map.shape\n l = 0\n for i in range(m):\n for j in range(i,n):\n spmat[l, 0] = i * bin\n spmat[l, 1] = j * bin\n spmat[l, 2] = map[i, j]\n l += 1\n fmt = ['%d', '%d', format]\n return writeArray(filename, spmat, format=fmt)\n\ndef saveHiC(hic, filename=None, map=True, **kwargs):\n \"\"\"Saves *HiC* model data as :file:`filename.hic.npz`. If *map* is **True**, \n Hi-C contact map will not be saved and it can be loaded from raw data file \n later. If *filename* is **None**, name of the Hi-C instance will be used as \n the filename, after ``\" \"`` (white spaces) in the name are replaced with \n ``\"_\"`` (underscores). Upon successful completion of saving, filename is \n returned. This function makes use of :func:`numpy.savez` function.\"\"\"\n\n assert isinstance(hic, HiC), 'hic must be a HiC instance.'\n \n if filename is None:\n filename = hic.getTitle().replace(' ', '_')\n \n if filename.endswith('.hic'):\n filename += '.npz'\n elif not filename.endswith('.hic.npz'):\n filename += '.hic.npz'\n\n attr_dict = hic.__dict__.copy()\n if not map:\n attr_dict.pop('_map')\n\n ostream = openFile(filename, 'wb', **kwargs)\n np.savez_compressed(ostream, **attr_dict)\n ostream.close()\n\n return filename\n\ndef loadHiC(filename):\n \"\"\"Returns HiC instance after loading it from file (*filename*).\n This function makes use of :func:`numpy.load` function. See also \n :func:`saveHiC`.\"\"\"\n \n attr_dict = np.load(filename)\n hic = HiC()\n\n keys = attr_dict.keys()\n\n for k in keys:\n val = attr_dict[k]\n if len(val.shape) == 0:\n val = np.asscalar(val)\n setattr(hic, k, val)\n return hic\n\ndef saveHiC_h5(hic, filename=None, **kwargs):\n \"\"\"Saves *HiC* model data as :file:`filename.hic.npz`. If *filename* is \n **None**, name of the Hi-C instance will be used as \n the filename, after ``\" \"`` (white spaces) in the name are replaced with \n ``\"_\"`` (underscores). Upon successful completion of saving, filename is \n returned. This function makes use of :func:`numpy.savez` function.\"\"\"\n\n try:\n import h5py\n except:\n raise ImportError('h5py needs to be installed for using this function')\n\n assert isinstance(hic, HiC), 'hic must be a HiC instance.'\n \n if filename is None:\n filename = hic.getTitle().replace(' ', '_')\n \n if filename.endswith('.hic'):\n filename += '.hic'\n elif not filename.endswith('.hic.h5'):\n filename += '.hic.h5'\n\n attr_dict = hic.__dict__.copy()\n\n with h5py.File(filename, 'w') as f:\n for key in attr_dict:\n value = attr_dict[key]\n compression = None if np.isscalar(value) else 'gzip'\n f.create_dataset(key, data=value, compression=compression)\n\n return filename\n\ndef loadHiC_h5(filename):\n \"\"\"Returns HiC instance after loading it from file (*filename*).\n This function makes use of :func:`numpy.load` function. See also \n :func:`saveHiC`.\"\"\"\n \n try:\n import h5py\n except:\n raise ImportError('h5py needs to be installed for using this function')\n\n hic = HiC()\n with h5py.File(filename, 'r') as f:\n for key in f.keys():\n try:\n value = f[key][:]\n except:\n value = f[key][()]\n setattr(hic, key, value)\n\n return hic\n" ]
[ [ "numpy.logical_or", "numpy.sum", "scipy.stats.mode", "numpy.diag", "numpy.diff", "numpy.asarray", "numpy.isscalar", "numpy.append", "numpy.asscalar", "numpy.expand_dims", "numpy.isnan", "numpy.where", "numpy.load", "numpy.eye", "numpy.double", "numpy.zeros", "numpy.count_nonzero", "scipy.sparse.coo_matrix", "numpy.sort", "numpy.percentile", "numpy.ma.compress_rowcols", "numpy.savez_compressed", "numpy.empty", "numpy.ma.array", "numpy.array", "numpy.outer" ] ]
minhhoang1023/GamestonkTerminal
[ "195dc19b491052df080178c0cc6a9d535a91a704", "195dc19b491052df080178c0cc6a9d535a91a704", "195dc19b491052df080178c0cc6a9d535a91a704" ]
[ "gamestonk_terminal/common/quantitative_analysis/rolling_model.py", "gamestonk_terminal/portfolio/portfolio_view.py", "gamestonk_terminal/cryptocurrency/nft/opensea_model.py" ]
[ "\"\"\"Rolling Statistics\"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\nfrom typing import Tuple\n\nimport pandas as pd\nimport pandas_ta as ta\n\nfrom gamestonk_terminal.decorators import log_start_end\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef get_rolling_avg(df: pd.DataFrame, length: int) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Return rolling mean and standard deviation\n\n Parameters\n ----------\n df_stock : pd.DataFrame\n Dataframe of target data\n length : int\n Length of rolling window\n\n Returns\n -------\n pd.DataFrame :\n Dataframe of rolling mean\n pd.DataFrame :\n Dataframe of rolling standard deviation\n \"\"\"\n rolling_mean = df.rolling(length, center=True, min_periods=1).mean()\n rolling_std = df.rolling(length, center=True, min_periods=1).std()\n\n return pd.DataFrame(rolling_mean), pd.DataFrame(rolling_std)\n\n\n@log_start_end(log=logger)\ndef get_spread(df: pd.DataFrame, length: int) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Standard Deviation and Variance\n\n Parameters\n ----------\n df_stock : pd.DataFrame\n DataFrame of targeted data\n\n Returns\n -------\n df_sd : pd.DataFrame\n Dataframe of rolling standard deviation\n df_var : pd.DataFrame\n Dataframe of rolling standard deviation\n \"\"\"\n df_sd = ta.stdev(\n close=df,\n length=length,\n ).dropna()\n df_var = ta.variance(\n close=df,\n length=length,\n ).dropna()\n\n return pd.DataFrame(df_sd), pd.DataFrame(df_var)\n\n\n@log_start_end(log=logger)\ndef get_quantile(\n df: pd.DataFrame, length: int, quantile_pct: float\n) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Overlay Median & Quantile\n\n Parameters\n ----------\n df : pd.DataFrame\n Dataframe of targeted data\n length : int\n Length of window\n quantile : float\n Quantile to display\n\n Returns\n -------\n df_med : pd.DataFrame\n Dataframe of median prices over window\n df_quantile : pd.DataFrame\n Dataframe of gievn quantile prices over window\n \"\"\"\n df_med = ta.median(close=df, length=length).dropna()\n df_quantile = ta.quantile(\n df,\n length=length,\n q=quantile_pct,\n ).dropna()\n\n return pd.DataFrame(df_med), pd.DataFrame(df_quantile)\n\n\n@log_start_end(log=logger)\ndef get_skew(df: pd.DataFrame, length: int) -> pd.DataFrame:\n \"\"\"Skewness Indicator\n\n Parameters\n ----------\n df_stock : pd.DataFrame\n Dataframe of targeted data\n length : int\n Length of window\n\n Returns\n -------\n df_skew : pd.DataFrame\n Dataframe of rolling skew\n \"\"\"\n df_skew = ta.skew(close=df, length=length).dropna()\n return df_skew\n\n\n@log_start_end(log=logger)\ndef get_kurtosis(df: pd.DataFrame, length: int) -> pd.DataFrame:\n \"\"\"Kurtosis Indicator\n\n Parameters\n ----------\n df_stock : pd.DataFrame\n Dataframe of targeted data\n length : int\n Length of window\n\n Returns\n -------\n df_kurt : pd.DataFrame\n Dataframe of rolling kurtosis\n \"\"\"\n df_kurt = ta.kurtosis(close=df, length=length).dropna()\n return df_kurt\n", "\"\"\"Portfolio View\"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\nfrom typing import List, Optional\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nfrom gamestonk_terminal.config_terminal import theme\nfrom gamestonk_terminal.config_plot import PLOT_DPI\nfrom gamestonk_terminal.portfolio import (\n portfolio_model,\n)\n\nfrom gamestonk_terminal.helper_funcs import (\n plot_autoscale,\n export_data,\n)\nfrom gamestonk_terminal.decorators import log_start_end\nfrom gamestonk_terminal.rich_config import console\n\n# from reportlab.lib.pagesizes import letter\n# from reportlab.pdfgen import canvas\n# from reportlab.lib.utils import ImageReader\n# from gamestonk_terminal.portfolio import reportlab_helpers\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef load_info():\n \"\"\"Prints instructions to load a CSV\n\n Returns\n ----------\n text : str\n Information on how to load a csv\n \"\"\"\n text = \"\"\"\nIn order to load a CSV do the following:\n\n1. Add headers to the first row, below is data for each column:\\n\n\\t1. Identifier for the asset (such as a stock ticker)\n\\t2. Type of asset (stock, bond, option, crypto)\n\\t3. The volume of the asset transacted\n\\t4. The buy date in yyyy/mm/dd\n\\t5. The Price paid for the asset\n\\t6. Any fees paid during the transaction\n\\t7. A premium paid or received if this was an option\n\\t8. Whether the asset was bought (covered) or sold (shorted)\\n\n2. Place this file in gamestonk_terminal/portfolio/portfolios\\n\n \"\"\"\n console.print(text)\n\n\n@log_start_end(log=logger)\ndef display_returns_vs_bench(\n portfolio: portfolio_model.Portfolio,\n benchmark: str = \"SPY\",\n external_axes: Optional[plt.Axes] = None,\n):\n \"\"\"Display portfolio returns vs benchmark\n\n Parameters\n ----------\n portfolio: Portfolio\n Custom portfolio object with trade list\n benchmark: str\n Symbol for benchmark. Defaults to SPY\n external_axes: plt.Axes\n Optional axes to display plot on\n \"\"\"\n if external_axes is None:\n _, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)\n else:\n ax = external_axes\n\n portfolio.generate_holdings_from_trades()\n portfolio.add_benchmark(benchmark)\n\n cumulative_returns = (1 + portfolio.returns).cumprod()\n benchmark_c_returns = (1 + portfolio.benchmark_returns).cumprod()\n\n ax.plot(cumulative_returns.index, cumulative_returns, label=\"Portfolio\")\n ax.plot(benchmark_c_returns.index, benchmark_c_returns, label=\"Benchmark\")\n ax.set_ylabel(\"Cumulative Returns\")\n ax.legend(loc=\"upper left\")\n theme.style_primary_axis(ax)\n if not external_axes:\n theme.visualize_output()\n\n\n@log_start_end(log=logger)\ndef display_allocation(\n portfolio: portfolio_model.Portfolio,\n export: str = \"\",\n external_axes: Optional[plt.Axes] = None,\n):\n \"\"\"Display allocation of assets vs time\n\n Parameters\n ----------\n portfolio: Portfolio\n Portfolio object with trades loaded\n export: str\n Format to export plot\n external_axes: plt.Axes\n Optional axes to display plot on\n \"\"\"\n portfolio.generate_holdings_from_trades()\n all_holdings = pd.concat(\n [\n portfolio.portfolio[\"StockHoldings\"],\n portfolio.portfolio[\"ETFHoldings\"],\n portfolio.portfolio[\"CryptoHoldings\"],\n ],\n axis=1,\n )\n all_holdings = all_holdings.drop(columns=[\"temp\"])\n\n if external_axes is None:\n _, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)\n else:\n ax = external_axes\n all_holdings.plot(ax=ax)\n ax.set_title(\"Individual Asset Holdings\")\n ax.legend(loc=\"upper left\")\n ax.set_ylabel(\"Holdings ($)\")\n theme.style_primary_axis(ax)\n if external_axes is None:\n theme.visualize_output()\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"rolling\",\n )\n\n\n@log_start_end(log=logger)\ndef display_rolling_stats(\n portfolio: portfolio_model.Portfolio,\n length: int = 60,\n benchmark: str = \"SPY\",\n risk_free_rate: float = 0,\n external_axes: Optional[List[plt.Axes]] = None,\n export: str = \"\",\n):\n \"\"\"Display portfolio returns vs benchmark\n\n Parameters\n ----------\n portfolio: Portfolio\n Custom portfolio object with trade list\n length: int\n Length of rolling window\n benchmark: str\n Symbol for benchmark. Defaults to SPY\n risk_free_rate: float\n Value to use for risk free rate in sharpe/other calculations\n external_axes: Optional[List[plt.Axes]]\n Optional axes to display plot on\n export: str\n Export to file\n \"\"\"\n portfolio.generate_holdings_from_trades()\n portfolio.add_benchmark(benchmark)\n portfolio.add_rf(risk_free_rate)\n if external_axes is None:\n _, ax = plt.subplots(4, 1, figsize=(8, 8), dpi=PLOT_DPI, sharex=True)\n else:\n if len(external_axes) != 4:\n console.print(\"[red]4 axes expected./n[/red]\")\n return\n ax = external_axes\n rolling_volatility = portfolio.returns.rolling(length).std()\n rolling_volatility_bench = portfolio.benchmark_returns.rolling(length).std()\n\n rolling_sharpe = portfolio.returns.rolling(length).apply(\n lambda x: (x.mean() - risk_free_rate) / x.std()\n )\n rolling_sharpe_bench = portfolio.benchmark_returns.rolling(length).apply(\n lambda x: (x.mean() - risk_free_rate) / x.std()\n )\n\n rolling_volatility.plot(ax=ax[1])\n rolling_volatility_bench.plot(ax=ax[1])\n ax[1].set_title(\"Rolling Volatility\")\n\n rolling_sharpe.plot(ax=ax[2])\n rolling_sharpe_bench.plot(ax=ax[2])\n ax[2].set_title(\"Rolling Sharpe Ratio\")\n\n # Rolling beta is defined as Cov(Port,Bench)/var(Bench)\n covs = (\n pd.DataFrame(\n {\"Portfolio\": portfolio.returns, \"Benchmark\": portfolio.benchmark_returns}\n )\n .dropna(axis=0)\n .rolling(length)\n .cov()\n .unstack()\n .dropna()\n )\n rolling_beta = covs[\"Portfolio\"][\"Benchmark\"] / covs[\"Benchmark\"][\"Benchmark\"]\n rolling_beta.plot(ax=ax[3])\n ax[3].set_title(\"Rolling Beta to Benchmark\")\n\n c_returns = (1 + portfolio.returns).cumprod()\n bench_c_rets = (1 + portfolio.benchmark_returns).cumprod()\n\n ax[0].plot(c_returns.index, c_returns)\n ax[0].plot(bench_c_rets.index, bench_c_rets)\n ax[0].set_title(\"Cumulative Returns\")\n\n if external_axes is None:\n\n for a in ax[0], ax[1], ax[2]:\n a.legend([\"Portfolio\", \"Benchmark\"], loc=\"upper left\")\n for a in ax[0], ax[1], ax[2], ax[3]:\n a.set_xlim(portfolio.returns.index[0], portfolio.returns.index[-1])\n a.set_xlabel([])\n a.grid(\"on\")\n theme.style_primary_axis(a)\n\n ax[3].set_xlabel(\"Date\")\n\n theme.visualize_output()\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"rolling\",\n )\n\n\n@log_start_end(log=logger)\ndef display_drawdown(\n holdings: pd.DataFrame,\n export: str = \"\",\n external_axes: Optional[List[plt.Axes]] = None,\n):\n \"\"\"Display drawdown curve\n\n Parameters\n ----------\n holdings: pd.DataFrame\n Dataframe of holdings vs time\n export: str\n Format to export data\n external_axes: plt.Axes\n Optional axes to display plot on\n \"\"\"\n drawdown = portfolio_model.calculate_drawdown(holdings)\n if external_axes is None:\n _, ax = plt.subplots(2, 1, figsize=plot_autoscale(), dpi=PLOT_DPI, sharex=True)\n else:\n ax = external_axes\n\n ax[0].plot(holdings.index, holdings)\n ax[0].set_title(\"Holdings\")\n ax[1].plot(holdings.index, drawdown)\n ax[1].fill_between(holdings.index, np.asarray(drawdown), alpha=0.4)\n ax[1].set_title(\"Portfolio Drawdown\")\n\n theme.style_primary_axis(ax[1])\n if external_axes is None:\n theme.visualize_output()\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"dd\",\n )\n\n\n#\n# @log_start_end(log=logger)\n# def plot_overall_return(\n# comb: pd.DataFrame, m_tick: str, plot: bool = False\n# ) -> ImageReader:\n# \"\"\"Generates overall return graph\n#\n# Parameters\n# ----------\n# comb : pd.DataFrame\n# Dataframe with returns\n# m_tick : str\n# The ticker for the market asset\n# plot : bool\n# Whether to plot the graph or return it for PDF\n#\n# Returns\n# ----------\n# img : ImageReader\n# Overal return graph\n# \"\"\"\n# fig, ax = plt.subplots(figsize=(10, 5))\n# ax.plot(comb.index, comb[\"return\"], color=\"tab:blue\", label=\"Portfolio\")\n# ax.plot(comb.index, comb[(\"Market\", \"Return\")], color=\"orange\", label=m_tick)\n#\n# ax.set_ylabel(\"\", fontweight=\"bold\", fontsize=12, color=\"black\")\n# ax.set_xlabel(\"\")\n# ax.yaxis.set_label_coords(-0.1, 0.5)\n# ax.grid(True)\n# ax.spines[\"top\"].set_visible(False)\n# ax.spines[\"right\"].set_visible(False)\n# ax.spines[\"bottom\"].set_visible(False)\n# ax.spines[\"left\"].set_visible(False)\n# fig.suptitle(\n# \"Cumulative Performance\", y=0.99, fontweight=\"bold\", fontsize=14, color=\"black\"\n# )\n# ax.axhline(0, ls=\"-\", lw=1, color=\"gray\", zorder=1)\n# ax.axhline(0, ls=\"--\", lw=1, color=\"black\", zorder=2)\n# fig.set_facecolor(\"white\")\n# ax.set_title(\n# f'{comb.index[:1][0].strftime(\"%Y/%m/%d\")} - {comb.index[-1:][0].strftime(\"%Y/%m/%d\")}',\n# fontsize=12,\n# color=\"gray\",\n# )\n# ax.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))\n# ax.set_facecolor(\"white\")\n# ax.legend()\n# fig.autofmt_xdate()\n# if plot:\n# plt.show()\n# console.print(\"\")\n# return None\n# imgdata = BytesIO()\n# fig.savefig(imgdata, format=\"png\")\n# plt.close(\"all\")\n# imgdata.seek(0)\n# return ImageReader(imgdata)\n#\n#\n# @log_start_end(log=logger)\n# def plot_rolling_beta(df: pd.DataFrame) -> ImageReader:\n# \"\"\"Returns a chart with the portfolio's rolling beta\n#\n# Parameters\n# ----------\n# df : pd.DataFrame\n# The dataframe to be analyzed\n#\n# Returns\n# ----------\n# img : ImageReader\n# Rolling beta graph\n# \"\"\"\n#\n# fig, ax = plt.subplots(figsize=(10, 5))\n# ax.plot(\n# df.index,\n# df[\"total\"],\n# color=\"tab:blue\",\n# )\n#\n# ax.set_ylabel(\"\", fontweight=\"bold\", fontsize=12, color=\"black\")\n# ax.set_xlabel(\"\")\n# ax.yaxis.set_label_coords(-0.1, 0.5)\n# ax.grid(True)\n# ax.spines[\"top\"].set_visible(False)\n# ax.spines[\"right\"].set_visible(False)\n# ax.spines[\"bottom\"].set_visible(False)\n# ax.spines[\"left\"].set_visible(False)\n# fig.suptitle(\n# \"Rolling Beta of Stocks\", y=0.99, fontweight=\"bold\", fontsize=14, color=\"black\"\n# )\n# ax.axhline(0, ls=\"-\", lw=1, color=\"gray\", zorder=1)\n# ax.axhline(0, ls=\"--\", lw=1, color=\"black\", zorder=2)\n# fig.set_facecolor(\"white\")\n# ax.set_title(\n# f'{df.index[:1][0].strftime(\"%Y-%m-%d\")} - {df.index[-1:][0].strftime(\"%Y-%m-%d\")}',\n# color=\"gray\",\n# )\n# ax.set_facecolor(\"white\")\n# fig.autofmt_xdate()\n# imgdata = BytesIO()\n# fig.savefig(imgdata, format=\"png\")\n# plt.close(\"all\")\n# imgdata.seek(0)\n# return ImageReader(imgdata)\n#\n#\n# @log_start_end(log=logger)\n# def plot_ef(\n# stocks: List[str],\n# variance: float,\n# per_ret: float,\n# rf_rate: float,\n# period: str = \"3mo\",\n# n_portfolios: int = 300,\n# risk_free: bool = False,\n# ):\n# \"\"\"Display efficient frontier\n#\n# Parameters\n# ----------\n# stocks : List[str]\n# List of the stocks to be included in the weights\n# variance : float\n# The variance for the portfolio\n# per_ret : float\n# The portfolio's return for the portfolio\n# rf_rate : float\n# The risk free rate\n# period : str\n# The period to track\n# n_portfolios : int\n# The number of portfolios to generate\n# risk_free : bool\n# Include the risk-free asset\n# \"\"\"\n# fig, ax = plt.subplots(figsize=(10, 5), dpi=PLOT_DPI)\n# ef, rets, stds = optimizer_model.generate_random_portfolios(\n# [x.upper() for x in stocks], period, n_portfolios\n# )\n# sharpes = rets / stds\n# ax.scatter(stds, rets, marker=\".\", c=sharpes, cmap=\"viridis_r\")\n# plotting.plot_efficient_frontier(ef, ax=ax, show_assets=True)\n# # Find the tangency portfolio\n# ret_sharpe, std_sharpe, _ = ef.portfolio_performance(risk_free_rate=rf_rate)\n# ax.scatter(std_sharpe, ret_sharpe, marker=\"*\", s=100, c=\"r\", label=\"Max Sharpe\")\n# plt.plot(variance, per_ret, \"ro\", label=\"Portfolio\")\n# # Add risk free line\n# if risk_free:\n# y = ret_sharpe * 1.2\n# m = (ret_sharpe - rf_rate) / std_sharpe\n# x2 = (y - rf_rate) / m\n# x = [0, x2]\n# y = [rf_rate, y]\n# line = Line2D(x, y, color=\"#FF0000\", label=\"Capital Allocation Line\")\n# ax.set_xlim(xmin=min(stds) * 0.8)\n# ax.add_line(line)\n# ax.set_title(f\"Efficient Frontier simulating {n_portfolios} portfolios\")\n# ax.legend()\n# fig.tight_layout()\n# ax.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n#\n# if gtff.USE_ION:\n# plt.ion()\n#\n# imgdata = BytesIO()\n# fig.savefig(imgdata, format=\"png\")\n# plt.close(\"all\")\n# imgdata.seek(0)\n# return ImageReader(imgdata)\n\n\n# @log_start_end(log=logger)\n# def display_allocation2(data: pd.DataFrame, graph: bool):\n# \"\"\"Displays allocation\n# Parameters\n# ----------\n# data: pd.DataFrame\n# The portfolio allocation dataframe\n# graph: bool\n# If pie chart shall be displayed with table\"\"\"\n#\n# print_rich_table(data, headers=list(data.columns), title=\"Allocation\")\n# console.print(\"\")\n#\n# if graph:\n# graph_data = data[data[\"pct_allocation\"] >= 5].copy()\n# if not graph_data.empty:\n# graph_data.loc[\"Other\"] = [\n# \"NA\",\n# data[\"value\"].sum() - graph_data[\"value\"].sum(),\n# 100 - graph_data[\"value\"].sum(),\n# ]\n# labels = graph_data.index.values\n# sizes = graph_data[\"value\"].to_list()\n# else:\n# labels = data.index.values\n# sizes = data[\"value\"].to_list()\n# fig, ax = plt.subplots()\n# ax.pie(sizes, labels=labels, autopct=\"%1.1f%%\", startangle=90)\n# ax.axis(\"equal\")\n# ax.set_title(\"Portfolio Allocation\")\n# fig.set_tight_layout(True)\n#\n# plt.show()\n\n#\n# class Report:\n# @log_start_end(log=logger)\n# def __init__(self, df: pd.DataFrame, hist: pd.DataFrame, m_tick: str):\n# \"\"\"Generate financial reports.\n# Financial reports allow users to show the how they have been performing in\n# trades. This allows for a simple way to show progress and analyze metrics\n# that track portfolio performance\n#\n# Parameters\n# ----------\n# df : pd.DataFrame\n# The dataframe with previous holdings information\n# hist : pd.DataFrame\n# The dataframe with previous prices for stocks in the portfolio\n# df_m : pd.DataFrame\n# Dataframe of benchmark\n# n : int\n# The number of days to analyze\n#\n# Attributes\n# ----------\n# generate_report : None\n# Generates a report with the given parameters\n# generate_pg1 : None\n# Creates the first page of the PDF report\n# generate_pg2 : None\n# Creates the second page of the PDF report\n#\n# \"\"\"\n# self.df = df\n# self.hist = hist\n# self.m_tick = m_tick\n# self.df_m = yfinance_model.get_market(self.df.index[0], self.m_tick)\n# # self.returns, self.variance = portfolio_model.get_return(df, self.df_m, n)\n# self.returns = pd.DataFrame()\n# self.rf = get_rf()\n# self.betas = portfolio_model.get_rolling_beta(\n# self.df, self.hist, self.df_m, 365\n# )\n#\n# @log_start_end(log=logger)\n# def generate_report(self) -> None:\n# d = path.dirname(path.abspath(__file__)).replace(\n# \"gamestonk_terminal\", \"exports\"\n# )\n# loc = path.abspath(\n# path.join(\n# d,\n# f\"ar_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf\",\n# )\n# )\n# report = canvas.Canvas(loc, pagesize=letter)\n# reportlab_helpers.base_format(report, \"Overview\")\n# self.generate_pg1(report)\n# self.generate_pg2(report)\n# report.save()\n# console.print(\"File save in:\\n\", loc, \"\\n\")\n#\n# @log_start_end(log=logger)\n# def generate_pg1(self, report: canvas.Canvas) -> None:\n# report.drawImage(\n# plot_overall_return(self.returns, self.m_tick, False), 15, 400, 600, 300\n# )\n# main_text = portfolio_model.get_main_text(self.returns)\n# reportlab_helpers.draw_paragraph(report, main_text, 30, 410, 550, 200)\n# current_return = self.returns[\"return\"][-1]\n# beta = self.betas[\"total\"][-1]\n# market_return = self.returns[(\"Market\", \"Return\")][-1]\n# sharpe = f\"{(current_return - self.rf)/ np.std(self.returns['return']):.2f}\"\n# treynor = f\"{(current_return - self.rf)/ beta:.2f}\" if beta > 0 else \"N/A\"\n# alpha = f\"{current_return - (self.rf + beta * (market_return - self.rf)):.2f}\"\n# information = (\n# f\"{float(alpha)/ (np.std(self.returns['return'] - market_return)):.2f}\"\n# )\n# perf = [\n# [\"Sharpe\", sharpe],\n# [\"Treynor\", treynor],\n# [\"Alpha\", alpha],\n# [\"Information\", information],\n# ]\n# reportlab_helpers.draw_table(report, \"Performance\", 540, 300, 30, perf)\n# reportlab_helpers.draw_paragraph(\n# report, portfolio_model.performance_text, 140, 290, 460, 200\n# )\n# report.showPage()\n#\n# @log_start_end(log=logger)\n# def generate_pg2(self, report: canvas.Canvas) -> None:\n# reportlab_helpers.base_format(report, \"Portfolio Analysis\")\n# if \"Holding\" in self.df.columns:\n# report.drawImage(plot_rolling_beta(self.betas), 15, 400, 600, 300)\n# main_t = portfolio_model.get_beta_text(self.betas)\n# reportlab_helpers.draw_paragraph(report, main_t, 30, 410, 550, 200)\n# # report.drawImage(plot_ef(uniques, self.variance, self.returns[\"return\"][-1], self.rf), 15, 65, 600, 300)\n", "\"\"\" opensea.io Model \"\"\"\n\nimport logging\nfrom datetime import datetime\n\nimport pandas as pd\nimport requests\n\nfrom gamestonk_terminal.decorators import log_start_end\n\nlogger = logging.getLogger(__name__)\n\nAPI_URL = \"https://api.opensea.io/api/v1\"\n\n\n@log_start_end(log=logger)\ndef get_collection_stats(slug: str) -> pd.DataFrame:\n \"\"\"Get stats of a nft collection [Source: opensea.io]\n\n Parameters\n -------\n slug : str\n Opensea collection slug. If the name of the collection is Mutant Ape Yacht Club the slug is mutant-ape-yacht-club\n\n Returns\n -------\n pd.DataFrame\n collection stats\n \"\"\"\n res = requests.get(f\"{API_URL}/collection/{slug}\")\n if res.status_code == 200:\n data = res.json()\n collection = data[\"collection\"]\n stats = collection[\"stats\"]\n metrics = [\n \"Name\",\n \"Floor Price (ETH)\",\n \"Number of Owners\",\n \"Market Cap (ETH)\",\n \"Average Price ETH\",\n \"One day volume (ETH)\",\n \"One day change (%)\",\n \"One day sales (ETH)\",\n \"One day average price (ETH)\",\n \"Thirty day volume (ETH)\",\n \"Thirty day change (%)\",\n \"Thirty day sales (ETH)\",\n \"Thirty day average price (ETH)\",\n \"Total Supply (ETH)\",\n \"Total Sales (ETH)\",\n \"Total Volume (ETH)\",\n \"Creation Date\",\n \"URL\",\n ]\n values = [\n collection[\"name\"],\n \"-\" if not stats[\"floor_price\"] else float(stats[\"floor_price\"]),\n round(float(stats[\"num_owners\"]), 2),\n round(float(stats[\"market_cap\"]), 2),\n round(float(stats[\"average_price\"]), 2),\n round(float(stats[\"one_day_volume\"]), 2),\n round(float(stats[\"one_day_change\"]) * 100, 2),\n round(float(stats[\"one_day_sales\"]), 2),\n round(float(stats[\"one_day_average_price\"]), 2),\n round(float(stats[\"thirty_day_volume\"]), 2),\n round(float(stats[\"thirty_day_change\"]) * 100, 2),\n round(float(stats[\"thirty_day_sales\"]), 2),\n round(float(stats[\"thirty_day_average_price\"]), 2),\n round(float(stats[\"total_supply\"]), 2),\n round(float(stats[\"total_sales\"]), 2),\n round(float(stats[\"total_volume\"]), 2),\n datetime.strptime(\n collection[\"created_date\"], \"%Y-%m-%dT%H:%M:%S.%f\"\n ).strftime(\"%b %d, %Y\"),\n \"-\" if not collection[\"external_url\"] else collection[\"external_url\"],\n ]\n df = pd.DataFrame({\"Metric\": metrics, \"Value\": values})\n return df\n return pd.DataFrame()\n" ]
[ [ "pandas.DataFrame" ], [ "numpy.asarray", "pandas.DataFrame", "pandas.concat", "matplotlib.pyplot.subplots" ], [ "pandas.DataFrame" ] ]
matthieucoquet/probability
[ "2426f4fc4743ceedc1a638a03d19ce6654ebff76", "2426f4fc4743ceedc1a638a03d19ce6654ebff76", "2426f4fc4743ceedc1a638a03d19ce6654ebff76" ]
[ "tensorflow_probability/python/experimental/auto_batching/numpy_backend_test.py", "tensorflow_probability/python/bijectors/sigmoid_test.py", "tensorflow_probability/python/internal/backend/numpy/_utils.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for implementations of batched variables.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport hypothesis as hp\nfrom hypothesis import strategies as hps\nfrom hypothesis.extra import numpy as hpnp\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.experimental.auto_batching import backend_test_lib as backend_test\nfrom tensorflow_probability.python.experimental.auto_batching import instructions as inst\nfrom tensorflow_probability.python.experimental.auto_batching import numpy_backend\n\nNP_BACKEND = numpy_backend.NumpyBackend()\n\n\ndef var_init(max_stack_depth, initial_value):\n type_ = inst.TensorType(initial_value.dtype, initial_value.shape[1:])\n var = NP_BACKEND.create_variable(\n None, inst.VariableAllocation.FULL, type_,\n max_stack_depth, batch_size=initial_value.shape[0])\n return var.update(\n initial_value, NP_BACKEND.full_mask(initial_value.shape[0]))\n\n\n# A TF test case for self.assertAllEqual, but doesn't use TF so doesn't care\n# about Eager vs Graph mode.\nclass NumpyVariableTest(tf.test.TestCase, backend_test.VariableTestCase):\n\n def testNumpySmoke(self):\n \"\"\"Test the property on specific example, without relying on Hypothesis.\"\"\"\n init = (12, np.random.randn(3, 2, 2).astype(np.float32))\n ops = [('pop', [False, False, True]),\n ('push', [True, False, True]),\n ('update', np.ones((3, 2, 2), dtype=np.float32),\n [True, True, False]),\n ('pop', [True, False, True])]\n self.check_same_results(init, ops, var_init)\n\n @hp.given(hps.data())\n @hp.settings(\n deadline=None,\n max_examples=100)\n def testNumpyVariableRandomOps(self, data):\n # Hypothesis strategy:\n # Generate a random max stack depth and value shape\n # Deduce the batch size from the value shape\n # Make a random dtype\n # Generate a random initial value of that dtype and shape\n # Generate ops, some of which write random values of that dtype and shape\n max_stack_depth = data.draw(hps.integers(min_value=1, max_value=1000))\n value_shape = data.draw(hpnp.array_shapes(min_dims=1))\n batch_size = value_shape[0]\n dtype = data.draw(hpnp.scalar_dtypes())\n masks = hpnp.arrays(dtype=np.bool, shape=[batch_size])\n values = hpnp.arrays(dtype, value_shape)\n init_val = data.draw(values)\n ops = data.draw(\n hps.lists(\n hps.one_of(\n hps.tuples(hps.just('update'), values, masks),\n hps.tuples(hps.just('push'), masks),\n hps.tuples(hps.just('pop'), masks), # preserve line break\n hps.tuples(hps.just('read')))))\n self.check_same_results((max_stack_depth, init_val), ops, var_init)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Sigmoid Tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nfrom scipy import special\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import bijectors as tfb\n\nfrom tensorflow_probability.python.bijectors import bijector_test_util\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass SigmoidBijectorTest(tf.test.TestCase):\n \"\"\"Tests correctness of the Y = g(X) = (1 + exp(-X))^-1 transformation.\"\"\"\n\n def testBijector(self):\n self.assertStartsWith(tfb.Sigmoid().name, \"sigmoid\")\n x = np.linspace(-10., 10., 100).reshape([2, 5, 10]).astype(np.float32)\n y = special.expit(x)\n ildj = -np.log(y) - np.log1p(-y)\n bijector = tfb.Sigmoid()\n self.assertAllClose(\n y, self.evaluate(bijector.forward(x)), atol=0., rtol=1e-2)\n self.assertAllClose(\n x, self.evaluate(bijector.inverse(y)), atol=0., rtol=1e-4)\n self.assertAllClose(\n ildj,\n self.evaluate(bijector.inverse_log_det_jacobian(\n y, event_ndims=0)), atol=0., rtol=1e-6)\n self.assertAllClose(\n -ildj,\n self.evaluate(bijector.forward_log_det_jacobian(\n x, event_ndims=0)), atol=0., rtol=1e-4)\n\n def testScalarCongruency(self):\n bijector_test_util.assert_scalar_congruency(\n tfb.Sigmoid(), lower_x=-7., upper_x=7., eval_func=self.evaluate,\n rtol=.1)\n\n def testBijectiveAndFinite(self):\n x = np.linspace(-100., 100., 100).astype(np.float32)\n eps = 1e-3\n y = np.linspace(eps, 1. - eps, 100).astype(np.float32)\n bijector_test_util.assert_bijective_and_finite(\n tfb.Sigmoid(), x, y, eval_func=self.evaluate, event_ndims=0, atol=0.,\n rtol=1e-4)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Helper functions for numpy backend.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport importlib\nimport types\n\nimport numpy as np\nimport tensorflow as tf\n\n\n__all__ = [\n 'common_dtype',\n 'copy_docstring',\n 'numpy_dtype',\n 'try_import',\n]\n\n\n# TODO(jvdillon): Get decoration working. Eg,\n# # Dependency imports\n# import decorator\n\n\ndef copy_docstring(original_fn, new_fn): # pylint: disable=unused-argument\n return new_fn\n # TODO(jvdillon): Get decoration working. Eg,\n # @decorator.decorator\n # def wrap(wrapped_fn, *args, **kwargs):\n # del wrapped_fn\n # return new_fn(*args, **kwargs)\n # return wrap(original_fn)\n\n\ndef numpy_dtype(dtype):\n if dtype is None:\n return None\n if hasattr(dtype, 'as_numpy_dtype'):\n return dtype.as_numpy_dtype\n return dtype\n\n\ndef common_dtype(args_list, dtype_hint=None):\n \"\"\"Returns explict dtype from `args_list` if exists, else dtype_hint.\"\"\"\n dtype = None\n dtype_hint = None if dtype_hint is None else tf.as_dtype(dtype_hint)\n for a in tf.nest.flatten(args_list):\n if hasattr(a, 'dtype'):\n dt = tf.as_dtype(a.dtype)\n else:\n continue\n if dtype is None:\n dtype = dt\n elif dtype != dt:\n raise TypeError('Found incompatible dtypes, {} and {}.'.format(dtype, dt))\n if dtype is None and dtype_hint is None:\n return None\n return (dtype_hint if dtype is None else dtype).as_numpy_dtype\n\n\ndef is_complex(dtype):\n \"\"\"Returns whether this is a complex floating point type.\"\"\"\n return np.issubdtype(np.dtype(dtype), np.complexfloating)\n\n\nclass _FakeModule(types.ModuleType):\n \"\"\"Dummy module which raises `NotImplementedError` on `getattr` access.\"\"\"\n\n def __init__(self, name, doc):\n self._name = name\n self._doc = doc\n types.ModuleType.__init__(self, name, doc) # pylint: disable=non-parent-init-called\n\n def __dir__(self):\n return []\n\n def __getattr__(self, attr):\n raise NotImplementedError(self._doc)\n\n\ndef try_import(name): # pylint: disable=invalid-name\n try:\n return importlib.import_module(name)\n except ImportError:\n return _FakeModule(name, 'Error loading module \"{}\".'.format(name))\n" ]
[ [ "numpy.ones", "numpy.random.randn", "tensorflow.test.main" ], [ "numpy.log1p", "tensorflow.compat.v2.test.main", "numpy.log", "scipy.special.expit", "numpy.linspace" ], [ "numpy.dtype", "tensorflow.nest.flatten", "tensorflow.as_dtype" ] ]
mohakbhardwaj/mjmpc
[ "097e8d9bdaf0b3a15afa39030b2f53b00dfa25de" ]
[ "mjmpc/control/olgaussian_mpc.py" ]
[ "\"\"\"\nMPC with open-loop Gaussian policies\n\"\"\"\nfrom .controller import Controller\nfrom mjmpc.utils.control_utils import generate_noise, scale_ctrl\nimport copy\nimport numpy as np\nimport scipy.special\n\nclass OLGaussianMPC(Controller):\n def __init__(self, \n d_state,\n d_obs,\n d_action, \n action_lows,\n action_highs,\n horizon,\n init_cov,\n init_mean,\n base_action,\n num_particles,\n gamma,\n n_iters,\n step_size,\n filter_coeffs,\n set_sim_state_fn=None,\n rollout_fn=None,\n cov_type='diagonal',\n sample_mode='mean',\n batch_size=1,\n seed=0,\n use_zero_control_seq=False):\n \"\"\"\n Parameters\n __________\n base_action : str\n Action to append at the end when shifting solution to next timestep\n 'random' : appends random action\n 'null' : appends zero action\n 'repeat' : repeats second to last action\n num_particles : int\n Number of particles sampled at every iteration\n \"\"\"\n\n super(OLGaussianMPC, self).__init__(d_state,\n d_obs,\n d_action,\n action_lows, \n action_highs,\n horizon,\n gamma, \n n_iters,\n set_sim_state_fn,\n rollout_fn,\n sample_mode,\n batch_size,\n seed)\n self.init_cov = np.array([init_cov] * self.d_action)\n self.init_mean = init_mean.copy()\n self.mean_action = init_mean\n self.base_action = base_action\n self.num_particles = num_particles\n self.cov_type = cov_type\n self.cov_action = np.diag(self.init_cov)\n self.step_size = step_size\n self.filter_coeffs = filter_coeffs\n self.use_zero_control_seq = use_zero_control_seq\n\n def _get_next_action(self, state, mode='mean'):\n if mode == 'mean':\n next_action = self.mean_action[0].copy()\n elif mode == 'sample':\n delta = generate_noise(self.cov_action, self.filter_coeffs,\n shape=(1, 1), base_seed=self.seed_val + 123*self.num_steps)\n next_action = self.mean_action[0].copy() + delta.reshape(self.d_action).copy()\n else:\n raise ValueError('Unidentified sampling mode in get_next_action')\n return next_action\n \n # def sample_actions(self):\n # delta = generate_noise(self.cov_action, self.filter_coeffs,\n # shape=(self.num_particles, self.horizon), \n # base_seed = self.seed_val + self.num_steps) \n # act_seq = self.mean_action[None, :, :] + delta\n # # act_seq = scale_ctrl(act_seq, self.action_lows, self.action_highs)\n # return np.array(act_seq)\n\n def sample_noise(self):\n delta = generate_noise(self.cov_action, self.filter_coeffs,\n shape=(self.num_particles, self.horizon), \n base_seed = self.seed_val + self.num_steps) \n # act_seq = scale_ctrl(act_seq, self.action_lows, self.action_highs)\n return delta\n \n def generate_rollouts(self, state):\n \"\"\"\n Samples a batch of actions, rolls out trajectories for each particle\n and returns the resulting observations, costs, \n actions\n\n Parameters\n ----------\n state : dict or np.ndarray\n Initial state to set the simulation env to\n \"\"\"\n \n self._set_sim_state_fn(copy.deepcopy(state)) #set state of simulation\n # input('....')\n delta = self.sample_noise() #sample noise from covariance of current control distribution\n if self.use_zero_control_seq:\n delta[-1,:] = -1.0 * self.mean_action.copy()\n trajectories = self._rollout_fn(self.num_particles, self.horizon, \n self.mean_action, delta, mode=\"open_loop\") \n return trajectories\n \n def _shift(self):\n \"\"\"\n Predict good parameters for the next time step by\n shifting the mean forward one step\n \"\"\"\n self.mean_action[:-1] = self.mean_action[1:]\n if self.base_action == 'random':\n self.mean_action[-1] = np.random.normal(0, self.init_cov, self.d_action)\n elif self.base_action == 'null':\n self.mean_action[-1] = np.zeros((self.d_action, ))\n elif self.base_action == 'repeat':\n self.mean_action[-1] = self.mean_action[-2]\n else:\n raise NotImplementedError(\"invalid option for base action during shift\")\n\n def reset(self):\n self.num_steps = 0\n self.mean_action = np.zeros(shape=(self.horizon, self.d_action))\n self.cov_action = np.diag(self.init_cov)\n self.gamma_seq = np.cumprod([1.0] + [self.gamma] * (self.horizon - 1)).reshape(1, self.horizon)\n\n def _calc_val(self, cost_seq, act_seq):\n raise NotImplementedError(\"_calc_val not implemented\")\n\n" ]
[ [ "numpy.zeros", "numpy.diag", "numpy.random.normal", "numpy.cumprod", "numpy.array" ] ]
kite8/quant_learning
[ "d823974cd2b5a6b8e2a20fe42d7334051fa46ea0" ]
[ "STS_v2/compute_high_low_limit_v3.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 2 15:19:45 2018\n\n@author: kite\n\"\"\"\n\nimport datetime, time\nfrom pymongo import UpdateOne, ASCENDING, UpdateMany\nfrom database import DB_CONN\nfrom stock_util import get_trading_dates, get_all_codes\nimport tushare as ts\nimport numpy as np\nimport pandas as pd\nimport requests\nimport json\nimport datetime\n\n\"\"\"\n计算涨跌停价格\n\n只要获取到前一天的价格\n\n获取name和上市日期\n\n最新ipo规则\n如果是上市当天,则涨停价是上市发行价格的1.44倍\n所以需要获取到发行价格\n要不是\n\"\"\"\n\n# 获取发行价格并保存到数据库中\ndef fill_issueprice_and_timeToMarket():\n \"\"\"\n ipo_info.xlsx 是从东方choice中提取出来;\n columns:\n code -- 股票代码\n name -- 股票当前名字\n issueprice -- 发行价格\n timeToMarket -- 上市时间\n \"\"\"\n df = pd.read_excel('data/ipo_info.xlsx', header=0, dtype={'code':str})\n df = df.set_index('code')\n codes = df.index.tolist()\n \n update_requests = []\n \n for i,code in enumerate(codes):\n try:\n update_requests.append(\n UpdateOne(\n {'code':code},\n {'$set':{'issueprice':df.issueprice[code],\n 'timeToMarket':df.timeToMarket[code]}},\n upsert=True))\n except:\n print('code: %s, has problem' % code)\n \n if len(update_requests)>0:\n update_result = DB_CONN['basic'].bulk_write(update_requests, ordered=False)\n print('填充字段, 字段名: issueprice,数据集:%s,插入:%4d条,更新:%4d条' %\n ('basic', update_result.upserted_count, update_result.modified_count), flush=True)\n\ndef fixing_is_st(start, end):\n # 第一阶段\n df = pd.read_excel('data/stock_basic.xlsx', header=0, dtype={'code':str})\n df = df.set_index('code')\n codes = df[df['是否ST过'] == 1].index.tolist()\n total = len(codes)\n# all_dates = get_trading_dates(start, end)\n \n daily = DB_CONN['daily']\n \n excel_name = 'data/st_info.xlsx'\n for i in range(4):\n if i == 0:\n all_dates = get_trading_dates('2015-01-01', '2015-12-31')\n elif i == 1:\n all_dates = get_trading_dates('2016-01-01', '2016-12-31')\n if i == 2:\n all_dates = get_trading_dates('2017-01-01', '2017-12-31')\n elif i == 3:\n all_dates = get_trading_dates('2018-01-01', '2018-09-30')\n \n \n print('数据读取中')\n df = pd.read_excel(excel_name, i, header=0, dtype={'code':str})\n df = df.set_index(['code','state'])\n df.columns = df.columns.astype(np.datetime64)\n df.columns = df.columns.to_period('D')\n df.columns = df.columns.astype('str')\n print('数据读取完毕')\n \n \n for j, code in enumerate(codes):\n update_requests = []\n for date in all_dates:\n try:\n st_state = df.xs([code])[date]['是否ST']\n sst_state = df.xs([code])[date]['是否*ST']\n if (st_state == '否') and (sst_state == '否'):\n is_st_flag = False\n else:\n is_st_flag = True\n \n update_requests.append(\n UpdateOne(\n {'code':code, 'date':date, 'index':False},\n {'$set':{'is_st':is_st_flag}}\n )\n )\n except:\n print('something is wrong, code : %s, date : %s' % (code, date))\n \n if len(update_requests)>0:\n update_result = daily.bulk_write(update_requests, ordered=False)\n print('第%s年填充进度: %s/%s, 字段名: is_st,数据集:%s,插入:%4d条,更新:%4d条' %\n (i+1, j+1, total, 'daily', update_result.upserted_count, update_result.modified_count), flush=True)\n \n \n\ndef fill_high_and_low_price_between(start, end):\n \n \"\"\"\n for code in codes:\n timeToMarket = basic.find()\n \n for \n \"\"\"\n# st_mark = ['st', 'ST', '*st', '*ST']\n codes = ts.get_stock_basics().index.tolist()\n _df = pd.read_excel('data/stock_basic.xlsx', header=0, dtype={'code':str})\n _df = _df.set_index('code')\n st_codes = _df[_df['是否ST过'] == 1].index.tolist()\n total = len(codes)\n error_code = []\n\n for i,code in enumerate(codes):\n try:\n timeToMarket = DB_CONN['basic'].find_one({'code':code}, \n projection={'code':True, 'timeToMarket':True, '_id':False})['timeToMarket']\n except:\n error_code.append(code)\n continue\n \n daily_cursor = DB_CONN['daily'].find(\n {'code':code, 'date':{'$lte': end, '$gte': timeToMarket}, 'index':False},\n projection={'code':True, 'date':True, 'pre_close':True, '_id':False})\n \n update_requests = []\n \n for j,daily in enumerate(daily_cursor):\n date = daily['date']\n \n try:\n pre_close = daily['pre_close']\n except:\n if (j == 0) & (timeToMarket != date):\n pass\n# print('code: %s, time: %s, 数据初始日没有pre_close' % (code, date))\n elif timeToMarket == date:\n# print('code: %s, date: %s' % (code, date))\n issueprice = DB_CONN['basic'].find_one({'code':code},\n projection={'issueprice':True, '_id':False})['issueprice']\n \n high_limit = np.round(np.round(issueprice * 1.2, 2) * 1.2, 2)\n low_limit = np.round(np.round(issueprice * 0.8, 2) * 0.8, 2)\n \n update_requests.append(\n UpdateOne({'code':code, 'date':date, 'index':False},\n {'$set':{'high_limit':high_limit, 'low_limit':low_limit}},\n upsert=True))\n else:\n print('code: %s, time: %s, ipo_date: %s, 请速查原因' % (code, date, timeToMarket))\n error_code.append(code)\n continue\n \n# if date < '2016-08-09':\n# _date = '2016-08-09'\n# else:\n# _date = date\n# \n# try:\n# name = DB_CONN['basic'].find_one({'code':code, 'date':_date},\n# projection={'name':True, '_id':False})['name']\n# last_name = name\n# except:\n# if j == 0:\n# name = DB_CONN['basic'].find_one({'code':code},\n# projection={'name':True, '_id':False})['name']\n# last_name = name\n# else:\n## print('code: %s, date: %s' % (code, date))\n# name = last_name\n \n# if timeToMarket == date:\n# \n# issueprice = DB_CONN['basic'].find_one({'code':code},\n# projection={'issueprice':True, '_id':False})['issueprice']\n# \n# high_limit = np.round(np.round(issueprice * 1.2, 2) * 1.2, 2)\n# low_limit = np.round(np.round(issueprice * 0.8, 2) * 0.8, 2)\n\n# if daily['is_st'] :\n if code in st_codes:\n st_flag = DB_CONN['daily'].find_one({'code':code, 'date':date, 'index':False})['is_st']\n if st_flag:\n high_limit = np.round(pre_close * 1.05, 2)\n low_limit = np.round(pre_close * 0.95, 2)\n \n else:\n high_limit = np.round(pre_close * 1.1, 2)\n low_limit = np.round(pre_close * 0.9, 2)\n \n update_requests.append(\n UpdateOne({'code':code, 'date':date, 'index':False},\n {'$set':{'high_limit':high_limit, 'low_limit':low_limit}},\n upsert=True))\n \n if len(update_requests)>0:\n update_result = DB_CONN['daily'].bulk_write(update_requests, ordered=False)\n print('涨跌停计算, 进度: (%s/%s), code:%s, 数据集:%s, 插入:%4d条, 更新:%4d条' %\n (i+1, total, code, 'daily', update_result.upserted_count, update_result.modified_count), flush=True)\n \n# print('stock: %s high low limit complish, 进度: (%s/%s)' % (code, i+1, total), flush=True)\n\n# main funciton\nif __name__ == '__main__':\n daily_col = DB_CONN['daily']\n if 'code_1_index_1' not in daily_col.index_information().keys():\n daily_col.create_index(\n [('code', ASCENDING), ('index', ASCENDING)]\n )\n start = '2015-01-01'\n end = '2018-09-30'\n tic = time.process_time()\n fixing_is_st(start, end)\n# fill_issueprice_and_timeToMarket()\n fill_high_and_low_price_between(start, end)\n toc = time.process_time()\n delta = toc - tic\n print(delta)" ]
[ [ "numpy.round", "pandas.read_excel" ] ]
gshowalt/VirusPopModel
[ "8d41294fa06a44e8fa22ef390d6db14fba7818a1" ]
[ "Code/CarbonEquiv_Talmy.py" ]
[ "\n# importing all modules\nimport numpy as np\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom matplotlib import cm\nimport matplotlib.tri as tri\nfrom matplotlib.colors import LogNorm\nimport matplotlib.patches as mpatches\nfrom matplotlib.ticker import LogFormatter \n\nfrom collections import Counter\n\nfrom functools import wraps\n\nimport csv\nimport sys\n\nimport itertools\nfrom itertools import islice, cycle, chain\n\nimport scipy as sp\nfrom scipy.interpolate import griddata\nfrom scipy import interpolate\nfrom scipy.integrate import odeint\nfrom scipy.stats import pareto\nfrom scipy.stats import loguniform\n\nimport seaborn as sns\nimport pandas as pd\n\nimport statistics as stats\nimport lhsmdu\n\nfrom math import nan\n\nfrom SALib.sample import saltelli, latin, ff\nfrom SALib.analyze import sobol\n\nimport random\n\n\n# define the function which includes the differential equations\n# this was adapted from the leak/lyse experiment so I just left that in and set it to a final value later\n\ndef f2(s,t, leak, lyse, temp):\n \n # first define the relative contact rate (RCR) and brine concentrating factor (BCF) by temp\n if temp < -1:\n RCR = 0.0716*temp**4 + 2.9311*temp**3 + 34.108*temp**2 + 45.826*temp + 3.5125 #Fit from Wells and Deming, 2006\n BCF = -0.0106 * temp **2 - 0.519 * temp + 0.2977\n sal = 32 * BCF\n else:\n RCR = 1\n sal = 32\n \n # these are our scaling factors for the temperature-dependent parameter distributions\n mux = 1 # for growth rate\n betx = 1 # for burst size\n phix = 1e-5 # for adsorption rate\n gamx = 1 # for lytic fraction\n \n # Temp-dependent parameter distribution for burst size\n beta = betx*(0.0064 * temp**3 - 0.3047 * temp ** 2 + 0.7701 * temp + 93.605)\n # also parameterized as a curve with a standard deviation (std) for other experiments\n # but here was simply a set curve for reproducibility\n \"\"\" beta_std = 0.0095 * temp **3 - 0.5184 * temp**2 + 2.2456 * temp + 126.59\n if beta_std < 0:\n beta_std = 0.\n beta = np.random.normal(beta_mu, beta_std)\"\"\"\n\n # Temp-dependent parameter distribution for growth rate \n # (we had two different distributions, but I went with the exponential one)\n # mu = mux*(2e-5*temp**3 + 0.0008 * temp **2 + 0.0091 * temp + 0.0386)\n # mu = 3e-6*temp**4 + 0.0001*temp**3+0.0014*temp**2 + 0.0092 * temp +0.0333\n mu = 0.0441*np.exp(0.4991*temp) \n \"\"\"mu_std = 0.1*2e-5*temp**3 + 0.0009 * temp **2 + 0.0144 * temp + 0.0818\n if mu_std<0:\n mu_std = 0.001\n mu = np.random.normal(mu_mu, mu_std)\"\"\"\n\n # Temp-dependent parameter distribution for adsorption rate \n # I also tried it as a function of salinity (immediately below), but chose temp for consistency\n #phi = phix * -1e-11*sal**2 +4e-9*sal - 9e-8\n phi = phix * (6e-13 * temp **5 - 2e-11 * temp ** 4 + 1e-10 * temp ** 3 + 3e-9 * temp ** 2 - 3e-8 * temp + 5e-8)\n \"\"\"phi_std = -2e-11*sal**2 + 4e-9*sal - 9e-8\n if phi_std < 0:\n phi_std = 0\n phi = np.random.normal(phi_mu, phi_std)\"\"\"\n \n # set conditions for when curve goes below zero\n if mu <= 0:\n mu = 0.000\n if beta < 0:\n beta = 1\n if phi < 0:\n phi = 1e-15\n \n # now we want to scale adsorption rate by RCR to incorporate the sea ice \n phi = phi * RCR \n\n \n # SET PARAMETERS\n alpha = 1.2e-7*3**((temp-23)/10)#4.2e-7 at +8, or 1.2e-7 at lower temps, at -5 --> mu = 0.25/day = 0.01/hr = 1e-8\n # alpha is a coefficient that we'd like to change with temperature? Or change eta?\n #nutrient transfer coefficient to bacteria (ug/cell * hr)\n Q = 0.022\n #half saturation constant (ug/mL)\n d = 1e-8\n #constant of bacterial death (1/hr)\n m = 1e-6\n #constant of viral decay (1/hr)\n g = leak\n #POM transfer coefficient from bacteria (ug/cell*hr)\n n = lyse\n #POM transfer coefficient from viral lysis ug/[burst]cell\n #gamma is a lysogeny value\n gamma = 1 #-1/temp #*mu\n \n # set up solution matrix\n N = s[0]\n B = s[1]\n V = s[2]\n P = s[3]\n \n #systems of equations below\n \n dNdt = - alpha * (N / (N + Q)) * B + g * (alpha * (N/(N+Q))*B) + (n * 1e-7 * (gamma) * phi * V * B)\n if N < 0:\n N = 0\n dBdt = (mu) * (N/(Q + N)) * B - gamma * phi * V * B - d*B\n if B < 1:\n B = 1\n dVdt = gamma*beta * B * phi*V - phi * V * B - m*V\n if V < 1:\n V = 1\n #dPdt = (g * (0.0083*1e-7))*B + (n * 1e-7 * phi * V * B*RCR) + 1e-10*m*V + 1.0e-7*d*B - (P/(P+Q))*alpha * B\n dPdt = g * alpha * (N/ (N+Q))*B + n * 1e-7 * (gamma)*phi*B*V\n \n # according to Jover, 2014 - virus has 0.02 to 0.05 fg carbon/virion => translate into ug Carbon = 5e-11\n VCarbonEQ = 5e-11\n BCarbonEQ = 1e-7 #from Bionumbers\n \n # building the carbon equivalent for viruses, lysate as per Talmy et al 2019\n rv = 90 #virus radius (nm)\n Qv = (41 * (rv - 2.5)**3 + 130*(7.5*(rv)**2 - 18.74 * rv + 15.63)) * (10e6/(6.022 * 10**23)) # virus carbon eq\n phiEQ = (phi)/(Qv) \n Qh = 1e-7\n etav = beta * (Qv/Qh)\n \n TotalVCarbon = (phiEQ * (gamma) * (V*VCarbonEQ) * (B*BCarbonEQ))\n VirusCarbon = etav * (phiEQ * (gamma) * (V*VCarbonEQ) * (B*BCarbonEQ))\n LysateCarbon = (1-etav)*(phiEQ * (gamma) * (V*VCarbonEQ) * (B*BCarbonEQ))\n LeakCarbon = g * (alpha * (N/(N+Q))*B)\n\n \n #print (mu, beta, phi, gamma)\n return [dNdt, dBdt, dVdt, dPdt, TotalVCarbon, VirusCarbon, LysateCarbon, LeakCarbon]\n\n\n# define time, temperature scale\ntime = 5000\ntemp_list = [-12.5,-10, -8, -6, -4, -2]\nt = np.linspace(1,time,1000)\n\n# set up empty matricies\nDOMX = []\nDOMA = []\nDOMB = []\nDOMC = []\nDOM1 = []\nDOM10 = []\nDOM100 = []\n\nRCRlist = []\nMulist = []\nendvals1 = []\nendvals2 = []\nendvals3 = []\nendvals4 = []\nBurstlist = []\nAdsorplist = []\n\ncount = 0\nplt.rcParams[\"font.family\"] = \"sans-serif\"\nfig1 = plt.figure(figsize=(20,15))\nfig1.tight_layout()\nplt.rcParams.update({'font.size': 15})\n\nfor xx in temp_list:\n temp = xx\n count +=1\n mu = 0.0441*np.exp(0.4991*temp)\n gamma = 1\n #print (\"gamma is:\", gamma, \"and mu is:\", mu)\n if temp < -1:\n RCR = 0.0716*temp**4 + 2.9311*temp**3 + 34.108*temp**2 + 45.826*temp + 3.5125 #Fit from Wells and Deming, 2006\n BCF = -0.0106 * temp **2 - 0.519 * temp + 0.2977\n sal = 32 * BCF\n else:\n BCF = 1\n sal = 32\n \n s0=[0.12*BCF,1e4*BCF, 1e5*BCF,0,0,0,0,0]\n s = odeint(f2,s0,t, args = (0.4,0.99, temp))\n xend.append(sum(s[:,3]))\n \n \n y1 = s[:,4]/(0.12)\n y2 = s[:,5]/(0.12)\n y3 = s[:,6]/(0.12)\n y4 = s[:,7]/(0.12)\n \n \n plt.subplot(3, 3, count)\n\n \n colors1 = ['cadetblue', '#FF6F61'] #, 'darkblue']\n plt.stackplot(t,y2,y3, colors = colors1,labels=['To Virus','To Lysate'])\n plt.legend(loc='lower right')\n\n plt.xlabel('Temperature: {} (˚C)'.format(temp))\n plt.yscale('log')\n plt.ylabel('% Initial Nutrient')\n\n\n \n # take last value of each returned number for the temp-dependent plot \n endvals1.append(y1[-1])\n endvals2.append(y2[-1])\n endvals3.append(y3[-1])\n endvals4.append(y4[-1])\n \n # make lists of calculated temp-dependent parameters if we want to plot against them alter\n RCRlist.append(RCR)\n Mulist.append(mu)\n beta = 1*(0.0064 * temp**3 - 0.3047 * temp ** 2 + 0.7701 * temp + 93.605)\n Burstlist.append(beta)\n phi = RCR* 1 * (6e-13 * temp **5 - 2e-11 * temp ** 4 + 1e-10 * temp ** 3 + 3e-9 * temp ** 2 - 3e-8 * temp + 5e-8)\n Adsorplist.append(phi)\n\n\n\nplt.subplots_adjust(hspace = 1)\nfig1.suptitle(\"Cumulative organic carbon recycled into Virions or Lysate \",fontsize=15)\n\n# Plot as a funciton of temperature\nplt.rcParams[\"font.family\"] = \"sans-serif\"\nplt.rcParams.update({'font.size': 20})\nfig2 = plt.figure(figsize=(10,5))\nfig2.tight_layout()\n\n\nendvals1_b = [i/max(endvals1) for i in endvals1]\nendvals2_b = [i/max(endvals2) for i in endvals2]\nendvals3_b = [i/max(endvals3) for i in endvals3]\nendvals4_b = [i/max(endvals4) for i in endvals4]\n\n#ax1 = plt.stackplot(temp_list, endvals2_b, endvals3, colors = colors1) #, labels=['To Virus','To Lysate', 'Cell exudate'])\n#ax1 = plt.plot(temp_list, Burstlist)\nplt.plot(temp_list,endvals2_b, c = 'cadetblue', marker = 'o', markeredgecolor='white', markersize=15, label='to Virions')\nplt.plot(temp_list, endvals3_b, c = '#FA7268', marker = 'o', markeredgecolor='white', markersize=15, label='to Lysate') \n\nplt.xlabel('Temperature (˚C)')\nplt.ylabel('Carbon Flow (Relative to Maximum)')\nplt.legend(loc='lower right')\nfig2.suptitle(\"Cumulative organic carbon recycled into \\nVirions or Lysate as a function of temperature\\n\",fontsize=15)\n\n\n\n\n# In[88]:\n#fig1.savefig('CE_Grid_withRCR_runaway.jpeg', bbox_inches=\"tight\", dpi=300,transparent=True)\n#fig2.savefig('CE_Temp_noRCR_line.jpeg', bbox_inches=\"tight\", dpi=300,transparent=True)\n\n\n\n" ]
[ [ "scipy.integrate.odeint", "matplotlib.pyplot.legend", "matplotlib.pyplot.stackplot", "matplotlib.pyplot.figure", "matplotlib.pyplot.yscale", "numpy.exp", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.subplot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "numpy.linspace", "matplotlib.pyplot.xlabel" ] ]
norheim/pextant
[ "f4235719279c0e6f178ae1e0f8b1ea3346533915" ]
[ "pextant/solvers/SEXTANTsolver.py" ]
[ "from pextant.lib.geoshapely import GeoPolygon, LONG_LAT\nimport numpy as np\nimport csv\n\nclass SEXTANTSolver(object):\n def __init__(self, environmental_model, cost_function, viz):\n self.env_model = environmental_model\n self.cost_function = cost_function\n self.viz = viz\n self.searches = []\n\n def solve(self, start_point, end_point):\n pass\n\n def solvemultipoint(self, waypoints):\n search_list = sextantSearchList(waypoints)\n for i in range(len(waypoints) - 1):\n search_result = self.solve(waypoints[i], waypoints[i + 1])\n search_list.append(search_result)\n return search_list, search_list.raw(), search_list.itemssrchd()\n\nclass sextantSearchList(object):\n def __init__(self, points):\n self.startpoint = points[0]\n self.endpoint = points[-1]\n self.waypoints = points\n self.list = []\n self.rawpoints = []\n\n def addresult(self, raw, nodes, coordinates, expanded_items):\n self.list.append(sextantSearch(raw, nodes, coordinates, expanded_items))\n\n def append(self, sextantsearch):\n self.list.append(sextantsearch)\n\n def raw(self):\n result = []\n for search in self.list:\n if search == False:\n return None\n result += search.raw\n return np.array(result)\n\n def coordinates(self):\n result = []\n for search in self.list:\n if type(search) == bool:\n return None\n result += search.coordinates.to(LONG_LAT).transpose().tolist()\n return GeoPolygon(LONG_LAT, *np.array(result).transpose())\n\n def itemssrchd(self):\n result = []\n for search in self.list:\n if type(search) == bool:\n return None\n result += search.expanded_items\n return np.array(result)\n\n def tojson(self, save=False):\n return [elt.tojson() for elt in self.list]\n\n def tocsv(self, filepath=None):\n csvlist = [elt.tocsv() for elt in self.list]\n rows = [['isStation', 'x', 'y', 'z', 'distanceMeters', 'energyJoules', 'timeSeconds']]\n for row in csvlist:\n rows += row\n if filepath:\n with open(filepath, 'wb') as csvfile:\n writer = csv.writer(csvfile)\n for row in rows:\n writer.writerow(row)\n return csvlist\n\n\nclass sextantSearch(object):\n def __init__(self, raw, nodes, coordinates, expanded_items):\n self.namemap = {\n 'time': ['timeList','totalTime'],\n 'pathlength': ['distanceList','totalDistance'],\n 'energy': ['energyList','totalEnergy']\n }\n #self.searches = []\n self.nodes = nodes\n self.raw = raw\n self.npraw = np.array(raw).transpose()\n self.coordinates = coordinates\n self.expanded_items = expanded_items\n\n def tojson(self):\n out = {}\n coordinates = self.coordinates.to(LONG_LAT).transpose().tolist()\n out[\"geometry\"] = {\n 'type': 'LineString',\n 'coordinates': coordinates\n }\n results = {}\n for k, v in self.namemap.items():\n results.update({v[0]:[],v[1]:0})\n for i, mesh_srch_elt in enumerate(self.nodes):\n derived = mesh_srch_elt.derived\n for k, v in derived.items():\n results[self.namemap[k][0]].append(v)\n for k, v in self.namemap.items():\n results[v[1]] = sum(results[v[0]])\n out[\"derivedInfo\"] = results\n return out\n\n def tocsv(self, coordstype=LONG_LAT):\n sequence = []\n coords = self.coordinates.to(coordstype).transpose().tolist()\n for i, mesh_srch_elt in enumerate(self.nodes):\n if i != 0:\n row_entry = [i==1 or i==len(coords)-1] #True if it's the first or last entry\n row_entry += coords[i] + [mesh_srch_elt.mesh_element.z]\n derived = mesh_srch_elt.derived\n row_entry += [derived['pathlength'], derived['time'], derived['energy']]\n sequence += [row_entry]\n return sequence\n" ]
[ [ "numpy.array" ] ]
Willyoung2017/doc-qa
[ "7ee02218952b0b9db63bc82b3895f743cdbd8f22" ]
[ "docqa/elmo/ablate_elmo_sub_filter.py" ]
[ "import argparse\nfrom datetime import datetime\n\nfrom tensorflow.contrib.keras.python.keras.initializers import TruncatedNormal\n\nfrom docqa import trainer\nfrom docqa.data_processing.qa_training_data import ContextLenKey\nfrom docqa.dataset import ClusteredBatcher\nfrom docqa.encoder import DocumentAndQuestionEncoder, SingleSpanAnswerEncoder, DocumentAndQuestionEncoderWithSubstring\nfrom docqa.evaluator import LossEvaluator, SpanEvaluator\nfrom docqa.elmo.elmo import ElmoLayer\nfrom docqa.elmo.lm_qa_models import AttentionWithElmo, SquadContextConcatSkip\nfrom docqa.model_dir import ModelDir\nfrom docqa.nn.attention import BiAttention, StaticAttentionSelf\nfrom docqa.nn.embedder import FixedWordEmbedder, CharWordEmbedder, LearnedCharEmbedder, LearnedSubstringEmbedder, \\\n FilteredFixedWordEmbedder\nfrom docqa.nn.layers import FullyConnected, ChainBiMapper, NullBiMapper, MaxPool, Conv1d, SequenceMapperSeq, \\\n VariationalDropoutLayer, ResidualLayer, ConcatWithProduct, MapperSeq, DropoutLayer\nfrom docqa.nn.recurrent_layers import CudnnGru\nfrom docqa.nn.similarity_layers import TriLinear\nfrom docqa.nn.span_prediction import BoundsPredictor\nfrom docqa.squad.squad_data import SquadCorpus, DocumentQaTrainingData\n\n\ndef main():\n parser = argparse.ArgumentParser(\"Train our ELMo model on SQuAD\")\n parser.add_argument(\"output_dir\")\n parser.add_argument(\"--dim\", type=int, default=90)\n parser.add_argument(\"--l2\", type=float, default=0)\n parser.add_argument(\"--mode\", choices=[\"input\", \"output\", \"both\", \"none\"], default=\"both\")\n parser.add_argument(\"--top_layer_only\", action=\"store_true\")\n #parser.add_argument(\"--combination\", choices=[\"x, y\", \"x * y\", \"x, y, x * y\"], default=\"x, y\")\n parser.add_argument(\"--use_substring\", type=str, default=\"None\")\n parser.add_argument(\"--sub_dim\", type=int, default=50)\n args = parser.parse_args()\n print(args)\n out = args.output_dir + \"-\" + datetime.now().strftime(\"%m%d-%H%M%S\")\n\n dim = args.dim\n recurrent_layer = CudnnGru(dim, w_init=TruncatedNormal(stddev=0.05))\n\n params = trainer.TrainParams(trainer.SerializableOptimizer(\"Adadelta\", dict(learning_rate=1.0)),\n ema=0.999, max_checkpoints_to_keep=2, async_encoding=10,\n num_epochs=24, log_period=30, eval_period=1200, save_period=1200,\n best_weights=(\"dev\", \"b17/text-f1\"),\n eval_samples=dict(dev=None, train=8000))\n\n lm_reduce = MapperSeq(\n ElmoLayer(args.l2, layer_norm=False, top_layer_only=args.top_layer_only),\n DropoutLayer(0.5),\n )\n CharEmbedderCls, EncoderCls = (LearnedCharEmbedder, DocumentAndQuestionEncoder) if args.use_substring == \"None\" \\\n else (LearnedSubstringEmbedder, DocumentAndQuestionEncoderWithSubstring)\n charEmbedder = CharEmbedderCls(word_size_th=14, char_th=20, char_dim=args.sub_dim, init_scale=0.05, force_cpu=True)\n if args.use_substring != None:\n charEmbedder._load_substring_vocab(args.use_substring)\n\n final_sub_dim = 100 #if args.combination == \"x, y\" else 300\n\n model = AttentionWithElmo(\n #combination=args.combination,\n encoder=EncoderCls(SingleSpanAnswerEncoder()),\n lm_model=SquadContextConcatSkip(),\n append_before_atten=(args.mode == \"both\" or args.mode == \"output\"),\n append_embed=(args.mode == \"both\" or args.mode == \"input\"),\n max_batch_size=128,\n word_embed=FilteredFixedWordEmbedder(vec_name=\"glove.840B.300d\", word_vec_init_scale=0, learn_unk=True, cpu=True),\n char_embed=CharWordEmbedder(\n charEmbedder,\n MaxPool(Conv1d(final_sub_dim, 5, 0.8)),\n shared_parameters=True\n ),\n embed_mapper=SequenceMapperSeq(\n VariationalDropoutLayer(0.8),\n recurrent_layer,\n VariationalDropoutLayer(0.8),\n ),\n lm_reduce=None,\n lm_reduce_shared=lm_reduce,\n per_sentence=False,\n memory_builder=NullBiMapper(),\n attention=BiAttention(TriLinear(bias=True), True),\n match_encoder=SequenceMapperSeq(FullyConnected(dim * 2, activation=\"relu\"),\n ResidualLayer(SequenceMapperSeq(\n VariationalDropoutLayer(0.8),\n recurrent_layer,\n VariationalDropoutLayer(0.8),\n StaticAttentionSelf(TriLinear(bias=True), ConcatWithProduct()),\n FullyConnected(dim * 2, activation=\"relu\"),\n )),\n VariationalDropoutLayer(0.8)),\n predictor = BoundsPredictor(ChainBiMapper(\n first_layer=recurrent_layer,\n second_layer=recurrent_layer\n ))\n )\n\n batcher = ClusteredBatcher(45, ContextLenKey(), False, False)\n data = DocumentQaTrainingData(SquadCorpus(), None, batcher, batcher)\n\n with open(__file__, \"r\") as f:\n notes = f.read()\n notes = str(sorted(args.__dict__.items(), key=lambda x:x[0])) + \"\\n\" + notes\n\n trainer.start_training(data, model, params,\n [LossEvaluator(), SpanEvaluator(bound=[17], text_eval=\"squad\")],\n ModelDir(out), notes)\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "tensorflow.contrib.keras.python.keras.initializers.TruncatedNormal" ] ]
Duy-Vu/stock-network
[ "3e84cfc581cd07001e86c20101c91c2f8910deb2" ]
[ "utils.py" ]
[ "import numpy as np\r\n\r\n\r\ndef clean_data(df, out_df_dir=\"\"):\r\n df.dropna(axis=1, inplace=True)\r\n\r\n if out_df_dir:\r\n df.to_csv(out_df_dir)\r\n\r\n return df\r\n\r\n\r\n# Calculate log change of daily price\r\ndef log_change(series):\r\n return np.log(series[1] / series[0])\r\n\r\n\r\n# Calculate correaltion\r\ndef calculate_cor(df, start, end):\r\n return df[start:end].rolling(\r\n window=2,\r\n min_periods=2\r\n ).apply(\r\n log_change,\r\n raw=True\r\n ).corr(method=\"pearson\")\r\n\r\n# Calculate profit\r\ndef take_profit(price, start, end):\r\n return price.iloc[end]/price.iloc[start] - 1" ]
[ [ "numpy.log" ] ]
jet-universe/particle_transformer
[ "68a7fbcd7d39a64b753251064f120462400895a1" ]
[ "networks/example_ParticleTransformer.py" ]
[ "import os\nimport torch\nfrom weaver.utils.logger import _logger\nfrom weaver.utils.import_tools import import_module\n\nParticleTransformer = import_module(\n os.path.join(os.path.dirname(__file__), 'ParticleTransformer.py'), 'ParT').ParticleTransformer\n\n\nclass ParticleTransformerWrapper(torch.nn.Module):\n def __init__(self, **kwargs) -> None:\n super().__init__()\n self.mod = ParticleTransformer(**kwargs)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'mod.cls_token', }\n\n def forward(self, points, features, lorentz_vectors, mask):\n return self.mod(features, v=lorentz_vectors, mask=mask)\n\n\ndef get_model(data_config, **kwargs):\n\n cfg = dict(\n input_dim=len(data_config.input_dicts['pf_features']),\n num_classes=len(data_config.label_value),\n # network configurations\n pair_input_dim=4,\n embed_dims=[128, 512, 128],\n pair_embed_dims=[64, 64, 64],\n num_heads=8,\n num_layers=8,\n num_cls_layers=2,\n block_params=None,\n cls_block_params={'dropout': 0, 'attn_dropout': 0, 'activation_dropout': 0},\n fc_params=[],\n activation='gelu',\n # misc\n trim=True,\n for_inference=False,\n )\n cfg.update(**kwargs)\n _logger.info('Model config: %s' % str(cfg))\n\n model = ParticleTransformerWrapper(**cfg)\n\n model_info = {\n 'input_names': list(data_config.input_names),\n 'input_shapes': {k: ((1,) + s[1:]) for k, s in data_config.input_shapes.items()},\n 'output_names': ['softmax'],\n 'dynamic_axes': {**{k: {0: 'N', 2: 'n_' + k.split('_')[0]} for k in data_config.input_names}, **{'softmax': {0: 'N'}}},\n }\n\n return model, model_info\n\n\ndef get_loss(data_config, **kwargs):\n return torch.nn.CrossEntropyLoss()\n" ]
[ [ "torch.nn.CrossEntropyLoss" ] ]
jessijzhao/fairscale
[ "d6a8fc6dadc5d5ab4e3ee3f42f8cd570d70d30ec" ]
[ "benchmarks/pipe.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport argparse\nimport logging\nimport math\nimport os\nimport time\nimport warnings\n\nfrom benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm\nimport torch\nfrom torch.distributed import rpc\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.data import DataLoader\nimport torchtext\nfrom torchtext.data.utils import get_tokenizer\n\nfrom fairscale.nn import Pipe\nfrom fairscale.nn.model_parallel import initialize_model_parallel\nfrom fairscale.nn.model_parallel.initialize import get_data_parallel_group, get_pipeline_parallel_group\nfrom fairscale.nn.pipe import LazyModule, pipe\nfrom fairscale.optim import GradScaler\nfrom fairscale.optim.oss import OSS\nfrom fairscale.utils.testing import dist_init, get_worker_map\n\ntry:\n from fairscale.optim import Adam # type: ignore\n\n can_benchmark = True\nexcept ImportError:\n from torch.optim import Adam # type: ignore\n\n can_benchmark = False\n\n\ndef init_random_seed(seed: int):\n import numpy\n\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n numpy.random.seed(seed)\n\n\nPIPE_CHUNKS = 2\niteration_count = 0\n\n\nclass EmbeddingLayer(nn.Embedding):\n def __init__(self, ntoken, ninp, initrange):\n super().__init__(ntoken, ninp)\n self.ninp = ninp\n self.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, src):\n return super().forward(src) * math.sqrt(self.ninp)\n\n\nclass PositionalEncodingLayer(nn.Module):\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncodingLayer, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer(\"pe\", pe)\n\n def forward(self, x):\n x = x + self.pe[: x.size(0), :]\n return self.dropout(x)\n\n\nclass TransformerDecoderLayer(nn.TransformerEncoderLayer):\n \"\"\"Though this class inherits from torch.nn.TransformerEncoderLayer,\n it functions as a decoder in this model\"\"\"\n\n def __init__(self, ninp, nhead, nhid, droupout):\n super().__init__(ninp, nhead, nhid, droupout)\n self.src_mask = None\n\n def _generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float(\"-inf\")).masked_fill(mask == 1, float(0.0))\n return mask\n\n def forward(self, src):\n global iteration_count\n iteration_count += 1\n # if iteration_count == 196:\n # dump_cuda_tensors()\n\n if self.src_mask is None or self.src_mask.size(0) != len(src):\n device = src.device\n mask = self._generate_square_subsequent_mask(len(src)).to(device)\n self.src_mask = mask\n\n return super().forward(src, self.src_mask)\n\n\nclass LinearLayer(nn.Linear):\n def __init__(self, ninp, ntoken, initrange):\n super().__init__(ninp, ntoken)\n self.bias.data.zero_()\n self.weight.data.uniform_(-initrange, initrange)\n\n\nclass TransformerLMSequential(nn.Sequential):\n \"\"\"A small language model based on the design of GPT-2 using nn.Sequential\n for compatability with Pipe\"\"\"\n\n def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder):\n layers = [\n EmbeddingLayer(ntokens, ninp, initrange),\n PositionalEncodingLayer(ninp, dropout),\n ]\n for _ in range(ndecoder):\n layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout))\n\n layers.append(LinearLayer(ninp, ntokens, initrange))\n super(TransformerLMSequential, self).__init__(*layers)\n\n\ndef get_data(device):\n with warnings.catch_warnings(record=True) as fjldska:\n TEXT = torchtext.data.Field(\n tokenize=get_tokenizer(\"basic_english\"), init_token=\"<sos>\", eos_token=\"<eos>\", lower=True\n )\n train_txt, val_txt, test_txt = torchtext.datasets.WikiText2.splits(TEXT)\n TEXT.build_vocab(train_txt)\n ntokens = len(TEXT.vocab.stoi)\n\n batch_size = 20\n eval_batch_size = 10\n train_data = batchify(train_txt, batch_size, TEXT, device)\n val_data = batchify(val_txt, eval_batch_size, TEXT, device)\n test_data = batchify(test_txt, eval_batch_size, TEXT, device)\n\n return ntokens, train_data, val_data, test_data\n\n\ndef batchify(data, bsz, TEXT, device):\n data = TEXT.numericalize([data.examples[0].text])\n nbatch = data.size(0) // bsz\n data = data.narrow(0, 0, nbatch * bsz)\n data = data.view(bsz, -1).t().contiguous()\n return data.to(device)\n\n\ndef get_batch(source, i, bptt):\n seq_len = min(bptt, len(source) - 1 - i)\n data = source[i : i + seq_len]\n target = source[i + 1 : i + 1 + seq_len].view(-1)\n return data, target\n\n\ndef make_model(args, device, ntokens):\n ninp = 2048 # embedding dimension\n nhid = 2048 # the dimension of the feedforward network model in nn.TransformerEncoder\n nhead = 32 # the number of heads in the multiheadattention models\n dropout = 0\n initrange = 0.1\n ndecoder = args.num_decoder_layers\n\n if args.lazy_construction:\n layers = [\n LazyModule(lambda: EmbeddingLayer(ntokens, ninp, initrange)),\n LazyModule(lambda: PositionalEncodingLayer(ninp, dropout)),\n ]\n for _ in range(ndecoder):\n layers.append(LazyModule(lambda: TransformerDecoderLayer(ninp, nhead, nhid, dropout)))\n\n layers.append(LazyModule(lambda: LinearLayer(ninp, ntokens, initrange)))\n model = layers\n else:\n model = TransformerLMSequential(ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)\n\n criterion = nn.CrossEntropyLoss()\n lr = 0.01 # learning rate\n\n def make_adam(model):\n if args.ddp_zero:\n return OSS(params=model.parameters(), optim=Adam, group=get_data_parallel_group(), lr=lr)\n else:\n return Adam(model.parameters(), lr=lr)\n\n optimizer = make_adam\n scaler = GradScaler()\n\n return model, criterion, optimizer, scaler\n\n\ndef get_tensors_by_size_bucket():\n from collections import defaultdict\n import gc\n\n size_buckets = defaultdict(int)\n for obj in gc.get_objects():\n if not isinstance(obj, torch.Tensor):\n continue\n if obj.device.type == \"cuda\":\n size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1\n\n return size_buckets\n\n\ndef dump_size_buckets(size_buckets, prefix=\"\"):\n from functools import reduce\n import operator\n\n total = 0\n for key, value in size_buckets.items():\n this = reduce(operator.mul, key) * value\n total += this\n print(prefix + f\"{key} : {value}, {this}\")\n\n print(prefix + f\"total = {total}\")\n\n\nlast_size_buckets = None\nonce = True\n\n\ndef safe_rank():\n try:\n return torch.distributed.get_rank()\n except AssertionError:\n return 0\n\n\ndef check_size_buckets():\n global last_size_buckets\n global once\n size_buckets = get_tensors_by_size_bucket()\n if last_size_buckets is not None:\n if size_buckets != last_size_buckets:\n print(f\"difference is oustanding tensors: {safe-rank()}\")\n dump_size_buckets(last_size_buckets, \"old: \")\n dump_size_buckets(size_buckets, \"new: \")\n if once:\n print(f\"dumping buckets for: {safe_rank()}\")\n dump_size_buckets(last_size_buckets, \"old: \")\n dump_size_buckets(size_buckets, \"new: \")\n once = False\n else:\n print(f\"size buckets none on {safe_rank()}\")\n last_size_buckets = size_buckets\n\n\ndef dump_cuda_tensors():\n print(f\"dumping cuda tensors...\")\n from functools import reduce\n import gc\n import operator\n\n for obj in gc.get_objects():\n if not isinstance(obj, torch.Tensor):\n continue\n if obj.device.type == \"cuda\":\n size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1\n\n print(f\"outstanding cuda tensors:\")\n total = 0\n for key, value in size_buckets.items():\n this = reduce(operator.mul, key) * value\n total += this\n print(f\"{key} : {value}, {this}\")\n print(f\"total size = {total}\")\n\n import pprint\n\n pprint.pprint(torch.cuda.memory_stats())\n\n\ndef train(lm_dataloader, model, criterion, optimizer, vocab_size, args):\n model.train()\n from functools import reduce\n import operator\n\n num_params = reduce(operator.add, (reduce(operator.mul, x.size()) for x in model.parameters()))\n if model.group:\n total = torch.Tensor([num_params])\n if torch.cuda.is_available():\n total = total.cuda()\n torch.distributed.all_reduce(total, group=model.group)\n logging.info(\n f\"training model, #prams = {num_params}, group: {model.group.rank()}, grank:\"\n f\" {torch.distributed.get_rank()}, sizes {model.group.size()}\"\n )\n torch.distributed.barrier()\n if model.group.rank() == 0:\n logging.info(f\"total #prams = {total.item()}\")\n else:\n logging.info(f\"training model, #prams = {num_params}\")\n vocab_size = 10000 # FIXME\n total_loss = 0.0\n start_time = time.time()\n word_counter = 0\n\n optimizer = optimizer(model)\n\n def get_first_device(model):\n if isinstance(model, DDP):\n model = model.module\n\n if not torch.cuda.is_available():\n return torch.device(\"cpu\")\n if model.devices:\n return model.devices[0]\n else:\n return torch.cuda.current_device()\n\n def get_last_device(model):\n if isinstance(model, DDP):\n model = model.module\n\n if not torch.cuda.is_available():\n return torch.device(\"cpu\")\n if model.devices:\n return model.devices[-1]\n else:\n return torch.cuda.current_device()\n\n pipe_group = model.group\n\n if args.ddp_zero:\n model = DDP(\n model,\n device_ids=[torch.cuda.current_device()],\n process_group=get_data_parallel_group(),\n find_unused_parameters=False,\n )\n\n if pipe_group and pipe_group.rank() != 0 and pipe_group.rank() != (pipe_group.size() - 1):\n thing = {\"input\": torch.zeros(args.batch_size)}\n\n class FakeDataset:\n def __getitem__(self, index):\n return thing\n\n def __len__(self):\n return len(lm_dataloader)\n\n lm_dataloader = FakeDataset()\n\n for i, batch in enumerate(lm_dataloader):\n bi = batch[\"input\"]\n if args.max_batch and i > args.max_batch:\n break\n optimizer.zero_grad()\n try:\n if (pipe_group is None or pipe_group.rank() == 0) and not args.ddp_zero:\n tmp = batch[\"input\"].to(get_first_device(model))\n output = model(tmp)\n else:\n output = model(batch[\"input\"])\n except Exception as e:\n raise RuntimeError(f\"training failed on {torch.distributed.get_rank()}\") from e\n\n if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:\n target = batch[\"target\"].to(get_last_device(model))\n output = output.to(target.device)\n\n loss = criterion(output.view(-1, vocab_size), target.view(-1))\n if args.ddp_zero:\n ddp_group = get_data_parallel_group()\n torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.SUM, group=ddp_group)\n loss /= ddp_group.size()\n loss.backward()\n del target\n else:\n if args.ddp_zero:\n model.module.back_helper(output)\n else:\n model.back_helper(output)\n\n del output\n\n torch.nn.utils.clip_grad_value_(model.parameters(), 0.05)\n optimizer.step()\n\n if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:\n total_loss += loss.item()\n log_interval = 1\n word_counter += batch[\"ntokens\"]\n if i % log_interval == 0 and i > 0:\n cur_loss = total_loss / log_interval\n elapsed = time.time() - start_time\n print(\n \"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}\".format(\n i, word_counter / elapsed, cur_loss, math.exp(cur_loss)\n )\n )\n word_counter = 0\n total_loss = 0\n start_time = time.time()\n # if i >= 10:\n # break\n # torch.cuda.empty_cache()\n # check_size_buckets()\n\n\ndef evaluate(eval_model, data_source, criterion, bptt, ntokens):\n eval_model.eval()\n total_loss = 0.0\n with torch.no_grad():\n for i in range(0, data_source.size(0) - 1, bptt):\n data, targets = get_batch(data_source, i, bptt)\n output = eval_model(data)\n output = output.to(targets.device)\n output_flat = output.view(-1, ntokens)\n total_loss += len(data) * criterion(output_flat, targets).item()\n return total_loss / (len(data_source) - 1)\n\n\ndef get_number_of_words(data):\n return data.size()[0] * data.size()[1]\n\n\ndef benchmark_language_model(train_data, val_data, test_data, model, criterion, optimizer, ntokens, args):\n epoch = 1\n bptt = 35\n start_time = time.time()\n\n print(\"-\" * 110)\n print(\"| start of epoch {:1d}\".format(epoch))\n print(\"-\" * 110)\n epoch_start_time = time.time()\n train(train_data, model, criterion, optimizer, bptt, ntokens, args)\n val_loss = 1 # evaluate(model, val_data, criterion, bptt, ntokens)\n print(\"-\" * 89)\n print(\n \"| end of epoch {:1d} | time: {:5.2f}s | valid loss {:5.2f} \".format(\n epoch, (time.time() - epoch_start_time), val_loss\n )\n )\n print(\"-\" * 110)\n\n elapsed_time = time.time() - start_time\n nwords = get_number_of_words(train_data) + get_number_of_words(val_data)\n wps = nwords / elapsed_time\n\n test_loss = 1 # evaluate(model, test_data, criterion, bptt, ntokens)\n print(\"=\" * 89)\n print(\n \"| end of training | test loss {:5.2f} \\n| time: {:5.2f}s | words: {:3d} | wps: {:5.2f}\".format(\n test_loss, elapsed_time, nwords, wps\n )\n )\n print(\"=\" * 110)\n\n if can_benchmark and len(model.balance) == 4:\n # Assert that words per second is within 3 standard deviations of the average\n # of six golden runs\n assert wps > 36954.4 - (3 * 116.825)\n\n print(\"Peak allocated bytes on cuda:0: {:1d}\".format(torch.cuda.memory_stats(0)[\"allocated_bytes.all.peak\"]))\n print(\"Peak allocated bytes on cuda:1: {:1d}\".format(torch.cuda.memory_stats(1)[\"allocated_bytes.all.peak\"]))\n print(\"Peak allocated bytes on cuda:2: {:1d}\".format(torch.cuda.memory_stats(2)[\"allocated_bytes.all.peak\"]))\n print(\"Peak allocated bytes on cuda:3: {:1d}\".format(torch.cuda.memory_stats(3)[\"allocated_bytes.all.peak\"]))\n\n # Assert that memory usage on each GPU is within 10% of golden run\n # Right-hand-side is golden run bytes * 110%\n assert torch.cuda.memory_stats(0)[\"allocated_bytes.all.peak\"] < 4061909504 * 1.1\n assert torch.cuda.memory_stats(1)[\"allocated_bytes.all.peak\"] < 4050944 * 1.1\n assert torch.cuda.memory_stats(2)[\"allocated_bytes.all.peak\"] < 10427392 * 1.1\n assert torch.cuda.memory_stats(3)[\"allocated_bytes.all.peak\"] < 2031824896 * 1.1\n print(\"No regression detected\")\n\n\ndef generate_balance_weighted(num_devices, num_layers, fraction=0.5):\n balance = []\n layers_assigned = 0\n average_count = num_layers / num_devices\n last_layers = int(average_count * fraction)\n\n balance = generate_balance(num_devices - 1, num_layers - last_layers)\n balance.append(last_layers)\n return balance\n\n\ndef generate_balance(num_devices, num_layers):\n balance = []\n layers_assigned = 0\n for i in range(num_devices):\n x = (num_layers - layers_assigned) / (num_devices - i)\n if x.is_integer():\n balance.append(int(x))\n layers_assigned += x\n else:\n balance.append(math.ceil(x))\n layers_assigned += math.ceil(x)\n return balance\n\n\ndef make_model_and_data(args, device, new_data: bool = True):\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if new_data:\n vocab_size = 10000\n model, criterion, optimizer, scaler = make_model(args, device, vocab_size)\n lm_dataset = BenchmarkLMDataset()\n lm_dataloader = DataLoader(\n lm_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_sentences_lm\n )\n return {\n \"model\": model,\n \"criterion\": criterion,\n \"optimizer\": optimizer,\n \"data\": lm_dataloader,\n \"vocab_size\": vocab_size,\n }\n else:\n data = get_data(device)\n ntokens, train_data, val_data, test_data = data\n model, criterion, optimizer, scaler = make_model(args, device, ntokens)\n return {\n \"model\": model,\n \"criterion\": criterion,\n \"optimizer\": optimizer,\n \"data\": data,\n }\n\n\ndef bench_single_process(args):\n num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1\n assert num_devices > 0\n init_random_seed(0)\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n new_data = True\n\n blob = make_model_and_data(args, None, new_data=new_data)\n model = blob[\"model\"]\n\n balance = generate_balance(min(num_devices, 4), len(model))\n p = pipe.Pipe(\n model, balance, chunks=args.chunks, pipelined_backward=args.pipelined_backward, checkpoint=args.checkpoint\n )\n del model\n del blob[\"model\"]\n\n if new_data:\n train(blob[\"data\"], p, blob[\"criterion\"], blob[\"optimizer\"], blob[\"vocab_size\"], args)\n else:\n ntokens, train_data, val_data, test_data = blob[\"data\"]\n benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)\n\n\ndef run_mp_worker(args, available_workers):\n new_data = True\n\n blob = make_model_and_data(args, None, new_data=new_data)\n model = blob[\"model\"]\n\n balance = generate_balance_weighted(get_pipeline_parallel_group().size(), len(model), 0.8)\n p = pipe.Pipe(\n model,\n balance,\n style=Pipe.AsyncSchedule,\n chunks=args.chunks,\n worker_map=get_worker_map(),\n input_device=torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\"),\n pipelined_backward=args.pipelined_backward,\n checkpoint=args.checkpoint,\n # loss_fn=blob[\"criterion\"],\n )\n if torch.cuda.is_available():\n p = p.cuda()\n if args.all_at_once and p.pipeline:\n print(f\"running all at once\")\n p.pipeline.all_at_once = True\n\n if new_data:\n train(blob[\"data\"], p, blob[\"criterion\"], blob[\"optimizer\"], blob[\"vocab_size\"], args)\n else:\n ntokens, train_data, val_data, test_data = blob[\"data\"]\n benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)\n\n\ndef run_worker(rank, world_size, args):\n if args.world_size != 0:\n world_size = args.world_size\n dist_init(rank + args.rank_base, world_size, hostname=args.host)\n initialize_model_parallel(1, world_size)\n init_random_seed(0)\n run_mp_worker(args, world_size)\n\n rpc.shutdown()\n torch.distributed.destroy_process_group()\n\n\ndef bench_multi_process(args, all_at_once=False):\n if args.local_world_size != 0:\n world_size = args.local_world_size\n else:\n world_size = min(torch.cuda.device_count(), 2)\n mp.spawn(run_worker, args=(world_size, args), nprocs=world_size, join=True)\n\n\nbest_device_map = {\n 0: \"mlx5_0:1\",\n 1: \"mlx5_0:1\",\n 2: \"mlx5_1:1\",\n 3: \"mlx5_1:1\",\n 4: \"mlx5_2:1\",\n 5: \"mlx5_2:1\",\n 6: \"mlx5_3:1\",\n 7: \"mlx5_3:1\",\n}\n\n\ndef bench_mpi(args):\n guess_rank = int(os.environ[\"OMPI_COMM_WORLD_RANK\"])\n world_size = int(os.environ[\"OMPI_COMM_WORLD_SIZE\"])\n local_rank = int(os.environ[\"OMPI_COMM_WORLD_LOCAL_RANK\"])\n os.environ[\"UCX_NET_DEVICES\"] = best_device_map[local_rank]\n\n os.environ[\"MASTER_ADDR\"] = args.host\n os.environ[\"MASTER_PORT\"] = \"10638\"\n if args.socket_name:\n os.environ[\"GLOO_SOCKET_IFNAME\"] = args.socket_name\n os.environ[\"TP_SOCKET_IFNAME\"] = args.socket_name\n\n torch.distributed.init_process_group(backend=\"gloo\", rank=guess_rank, world_size=world_size)\n\n os.environ[\"MASTER_ADDR\"] = args.host\n os.environ[\"MASTER_PORT\"] = \"10639\"\n init_method = f\"tcp://{os.environ['MASTER_ADDR']}:{os.environ['MASTER_PORT']}\"\n rank = torch.distributed.get_rank()\n world_size = torch.distributed.get_world_size()\n torch.cuda.set_device(local_rank % torch.cuda.device_count())\n\n rpc.init_rpc(\n f\"Test{rank}\",\n rank=rank,\n world_size=world_size,\n backend=rpc.BackendType.PROCESS_GROUP,\n rpc_backend_options=rpc.ProcessGroupRpcBackendOptions(rpc_timeout=20, init_method=init_method),\n )\n\n backends = {\"model_parallel_backend\": \"nccl\", \"pipeline_backend\": \"mpi\", \"ddp_backend\": \"nccl\"}\n\n if args.ddp_zero:\n initialize_model_parallel(1, 4, **backends)\n else:\n initialize_model_parallel(1, world_size, **backends)\n init_random_seed(0)\n\n run_mp_worker(args, world_size)\n\n rpc.shutdown()\n torch.distributed.destroy_process_group()\n\n\nparser = argparse.ArgumentParser(description=\"benchmark\")\nparser.add_argument(\"--local-world-size\", \"-l\", type=int, default=0, help=\"local world size\")\nparser.add_argument(\"--world-size\", \"-w\", type=int, default=0, help=\"world size\")\nparser.add_argument(\"--rank-base\", \"-r\", type=int, help=\"rank base\", default=0)\nparser.add_argument(\"--host\", \"-o\", type=str, default=\"localhost\", help=\"hostname\")\nparser.add_argument(\"--no-mpi\", action=\"store_true\", default=False, help=\"disable mpi\")\nparser.add_argument(\"--chunks\", type=int, default=1, help=\"number of microbatches per batch\")\nparser.add_argument(\"--batch-size\", type=int, default=8, help=\"size of a batch\")\nparser.add_argument(\"--all-at-once\", action=\"store_true\", default=False, help=\"do backward pass on whole batch at once\")\nparser.add_argument(\"--max-batch\", type=int, default=4, help=\"Max number of batches\")\nparser.add_argument(\"--socket-name\", type=str, default=None, help=\"socket ifname for gloo/tp\")\nparser.add_argument(\"--num-decoder-layers\", type=int, default=10, help=\"Number of decoder layers in the model\")\nparser.add_argument(\"--ddp-zero\", action=\"store_true\", default=False, help=\"enable ddp\")\nparser.add_argument(\n \"--lazy-construction\", action=\"store_true\", default=False, help=\"Number of decoder layers in the model\"\n)\nparser.add_argument(\n \"--checkpoint\", default=\"never\", choices=[\"always\", \"except_last\", \"never\"], help=\"Checkpointing strategy for pipe\"\n)\nparser.add_argument(\n \"--pipelined-backward\", dest=\"pipelined_backward\", action=\"store_true\", help=\"Pipelined backward pass\"\n)\nparser.add_argument(\n \"--no-pipelined-backward\", dest=\"pipelined_backward\", action=\"store_false\", help=\"Pipelined backward pass\"\n)\nparser.set_defaults(pipelined_backward=True)\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n # bench_multi_process(args, all_at_once=True)\n if args.no_mpi or \"OMPI_COMM_WORLD_RANK\" not in os.environ:\n print(f\"Running benchmark with args: {args}\")\n bench_single_process(args)\n else:\n if os.environ[\"OMPI_COMM_WORLD_RANK\"] == \"0\":\n print(f\"Running benchmark with args: {args}\")\n bench_mpi(args)\n" ]
[ [ "torch.utils.data.DataLoader", "torch.cuda.manual_seed", "torch.no_grad", "numpy.random.seed", "torch.cuda.is_available", "torch.distributed.rpc.shutdown", "torch.nn.Dropout", "torch.cos", "torch.distributed.init_process_group", "torch.cuda.device_count", "torch.sin", "torch.arange", "torch.cuda.memory_stats", "torch.device", "torch.Tensor", "torch.ones", "torch.multiprocessing.spawn", "torch.distributed.get_rank", "torch.distributed.get_world_size", "torch.manual_seed", "torch.cuda.current_device", "torch.distributed.barrier", "torch.distributed.rpc.ProcessGroupRpcBackendOptions", "torch.distributed.all_reduce", "torch.nn.CrossEntropyLoss", "torch.zeros", "torch.distributed.destroy_process_group" ] ]
konstmish/opt_methods
[ "ae73d9bd89ae5c463e70328d73cbd190175df98c" ]
[ "loss_functions/loss_oracle.py" ]
[ "import copy\nimport numpy as np\nimport warnings\n\nfrom .regularizer import Regularizer\n \n\nclass Oracle():\n \"\"\"\n Base class for all objectives. Can provide objective values,\n gradients and its Hessians as functions that take parameters as input.\n Takes as input the values of l1 and l2 regularization.\n \"\"\"\n def __init__(self, l1=0, l2=0, l2_in_prox=False, regularizer=None, seed=42):\n if l1 < 0.0:\n raise ValueError(\"Invalid value for l1 regularization: {}\".format(l1))\n if l2 < 0.0:\n raise ValueError(\"Invalid value for l2 regularization: {}\".format(l2))\n if l2 == 0. and l2_in_prox:\n warnings.warn(\"The value of l2 is set to 0, so l2_in_prox is changed to False.\")\n l2_in_prox = False\n self.l1 = l1\n self.l2 = 0 if l2_in_prox else l2\n self.l2_in_prox = l2_in_prox\n self.x_opt = None\n self.f_opt = np.inf\n self.regularizer = regularizer\n self.seed = seed\n \n if (l1 > 0 or l2_in_prox) and regularizer is None:\n l2_prox = l2 if l2_in_prox else 0\n self.regularizer = Regularizer(l1=l1, l2=l2_prox)\n self.rng = np.random.default_rng(seed)\n self._smoothness = None\n self._max_smoothness = None\n self._ave_smoothness = None\n self._importance_probs = None\n self._individ_smoothness = None\n \n def value(self, x):\n value = self._value(x)\n if self.regularizer is not None:\n value += self.regularizer(x)\n if value < self.f_opt:\n self.x_opt = copy.deepcopy(x)\n self.f_opt = value\n return value\n \n def gradient(self, x):\n pass\n \n def hessian(self, x):\n pass\n \n def hess_vec_prod(self, x, v, grad_dif=False, eps=None):\n pass\n \n @property\n def smoothness(self):\n pass\n \n @property\n def max_smoothness(self):\n pass\n \n @property\n def average_smoothness(self):\n pass\n\n def batch_smoothness(self, batch_size):\n pass\n \n @staticmethod\n def norm(x):\n pass\n \n @staticmethod\n def inner_prod(x, y):\n pass\n \n @staticmethod\n def outer_prod(x, y):\n pass\n \n @staticmethod\n def is_equal(x, y):\n pass\n" ]
[ [ "numpy.random.default_rng" ] ]
zhongwen/jax
[ "76d2a87915863d3a32732837cc7bf61b7b2f9e5b" ]
[ "tests/lax_numpy_test.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nfrom functools import partial\nimport itertools\nimport operator\nimport unittest\nfrom unittest import SkipTest\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport six\n\nimport numpy as onp\n\nimport jax.ops\nfrom jax import api\nfrom jax import lax\nfrom jax import numpy as lnp\nfrom jax import test_util as jtu\nfrom jax.lib import xla_bridge\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\nFLAGS = config.FLAGS\n\nnonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]\nnonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes\nempty_array_shapes = [(0,), (0, 4), (3, 0),]\n\nscalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]\narray_shapes = nonempty_array_shapes + empty_array_shapes\nnonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes\nnonempty_shapes = scalar_shapes + nonempty_array_shapes\nall_shapes = scalar_shapes + array_shapes\n\nfloat_dtypes = [onp.float32, onp.float64]\ncomplex_dtypes = [onp.complex64, onp.complex128]\nint_dtypes = [onp.int32, onp.int64]\nunsigned_dtypes = [onp.uint32, onp.uint64]\nbool_dtypes = [onp.bool_]\ndefault_dtypes = float_dtypes + int_dtypes\ninexact_dtypes = float_dtypes + complex_dtypes\nnumber_dtypes = float_dtypes + complex_dtypes + int_dtypes\nall_dtypes = number_dtypes + bool_dtypes\n\nOpRecord = collections.namedtuple(\n \"OpRecord\",\n [\"name\", \"nargs\", \"dtypes\", \"shapes\", \"rng\", \"diff_modes\", \"test_name\",\n \"check_dtypes\"])\n\n\ndef op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None,\n check_dtypes=True):\n test_name = test_name or name\n return OpRecord(name, nargs, dtypes, shapes, rng, diff_modes, test_name,\n check_dtypes)\n\nJAX_ONE_TO_ONE_OP_RECORDS = [\n op_record(\"abs\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"add\", 2, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"ceil\", 1, float_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"conj\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"equal\", 2, all_dtypes, all_shapes, jtu.rand_some_equal(), []),\n op_record(\"exp\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"fabs\", 1, float_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"float_power\", 2, inexact_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"floor\", 1, float_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"greater\", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),\n op_record(\"greater_equal\", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),\n op_record(\"less\", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),\n op_record(\"less_equal\", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []),\n op_record(\"log\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"logical_and\", 2, all_dtypes, all_shapes, jtu.rand_bool(), []),\n op_record(\"logical_not\", 1, all_dtypes, all_shapes, jtu.rand_bool(), []),\n op_record(\"logical_or\", 2, all_dtypes, all_shapes, jtu.rand_bool(), []),\n op_record(\"logical_xor\", 2, all_dtypes, all_shapes, jtu.rand_bool(), []),\n op_record(\"maximum\", 2, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"minimum\", 2, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"multiply\", 2, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"negative\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"not_equal\", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), [\"rev\"]),\n op_record(\"array_equal\", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), [\"rev\"]),\n op_record(\"reciprocal\", 1, inexact_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"subtract\", 2, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"sin\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"cos\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"tan\", 1, number_dtypes, all_shapes, jtu.rand_uniform(-1.5, 1.5),\n [\"rev\"]),\n op_record(\"sinh\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"cosh\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"tanh\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"arcsin\", 1, float_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n op_record(\"arccos\", 1, float_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n op_record(\"arctan\", 1, float_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n op_record(\"arctan2\", 2, float_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n op_record(\"arcsinh\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"arccosh\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"arctanh\", 1, number_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n]\n\nJAX_COMPOUND_OP_RECORDS = [\n # angle has inconsistent 32/64-bit return types across numpy versions.\n op_record(\"angle\", 1, number_dtypes, all_shapes, jtu.rand_default(), [],\n check_dtypes=False),\n op_record(\"atleast_1d\", 1, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"atleast_2d\", 1, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"atleast_3d\", 1, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"cbrt\", 1, default_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"conjugate\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"deg2rad\", 1, float_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"divide\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), [\"rev\"]),\n op_record(\"exp2\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"expm1\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [],\n test_name=\"expm1_large\"),\n op_record(\"expm1\", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), []),\n op_record(\"fix\", 1, float_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"floor_divide\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), [\"rev\"]),\n op_record(\"heaviside\", 2, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"hypot\", 2, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"kron\", 2, number_dtypes, nonempty_shapes, jtu.rand_default(), []),\n op_record(\"outer\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"imag\", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"iscomplex\", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"isfinite\", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),\n op_record(\"isinf\", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),\n op_record(\"isnan\", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),\n op_record(\"isneginf\", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),\n op_record(\"isposinf\", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []),\n op_record(\"isreal\", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"isrealobj\", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"log2\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"log10\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"log1p\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [],\n test_name=\"log1p_large\"),\n op_record(\"log1p\", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), []),\n op_record(\"logaddexp\", 2, float_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"logaddexp2\", 2, float_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"polyval\", 2, number_dtypes, nonempty_nonscalar_array_shapes, jtu.rand_default(), []),\n op_record(\"positive\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"power\", 2, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"rad2deg\", 1, float_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"ravel\", 1, all_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"real\", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n op_record(\"remainder\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"mod\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"sinc\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"square\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"sqrt\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n op_record(\"transpose\", 1, all_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\n op_record(\"true_divide\", 2, all_dtypes, all_shapes, jtu.rand_nonzero(), [\"rev\"]),\n op_record(\"where\", 3, (onp.float32, onp.int64), all_shapes, jtu.rand_some_zero(), []),\n op_record(\"diff\", 1, number_dtypes, nonzerodim_shapes, jtu.rand_default(), [\"rev\"]),\n]\n\nJAX_BITWISE_OP_RECORDS = [\n op_record(\"bitwise_and\", 2, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_bool(), []),\n op_record(\"bitwise_not\", 1, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_bool(), []),\n op_record(\"bitwise_or\", 2, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_bool(), []),\n op_record(\"bitwise_xor\", 2, int_dtypes + unsigned_dtypes, all_shapes,\n jtu.rand_bool(), []),\n]\n\nJAX_REDUCER_RECORDS = [\n op_record(\"mean\", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),\n op_record(\"prod\", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), []),\n op_record(\"sum\", 1, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"var\", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),\n op_record(\"std\", 1, inexact_dtypes, nonempty_shapes, jtu.rand_default(), []),\n]\n\nJAX_REDUCER_NO_DTYPE_RECORDS = [\n op_record(\"all\", 1, all_dtypes, all_shapes, jtu.rand_some_zero(), []),\n op_record(\"any\", 1, all_dtypes, all_shapes, jtu.rand_some_zero(), []),\n op_record(\"max\", 1, all_dtypes, nonempty_shapes, jtu.rand_default(), []),\n op_record(\"min\", 1, all_dtypes, nonempty_shapes, jtu.rand_default(), []),\n]\n\nJAX_ARGMINMAX_RECORDS = [\n op_record(\"argmin\", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal(), []),\n op_record(\"argmax\", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal(), []),\n]\n\nJAX_OPERATOR_OVERLOADS = [\n op_record(\"__add__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__sub__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__mul__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__eq__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__ne__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__lt__\", 2, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__gt__\", 2, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__ge__\", 2, default_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__neg__\", 1, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__pow__\", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), []),\n op_record(\"__mod__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"__floordiv__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"__truediv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"__abs__\", 1, number_dtypes, all_shapes, jtu.rand_default(), []),\n # TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2\n op_record(\"__invert__\", 1, int_dtypes, all_shapes, jtu.rand_default(), []),\n # TODO(mattjj): investigate these failures\n # op_record(\"__or__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n # op_record(\"__and__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n # op_record(\"__xor__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n # op_record(\"__divmod__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n # TODO(mattjj): lshift, rshift\n]\n\nJAX_RIGHT_OPERATOR_OVERLOADS = [\n op_record(\"__radd__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__rsub__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__rmul__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"__rpow__\", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), []),\n op_record(\"__rmod__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"__rfloordiv__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n op_record(\"__rtruediv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n # op_record(\"__ror__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n # op_record(\"__rand__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n # op_record(\"__rxor__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n # op_record(\"__rdivmod__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n]\n\nnumpy_version = tuple(map(int, onp.version.version.split('.')))\nif numpy_version >= (1, 15):\n JAX_COMPOUND_OP_RECORDS += [\n op_record(\"isclose\", 2, all_dtypes, all_shapes, jtu.rand_small_positive(), []),\n op_record(\"gcd\", 2, int_dtypes, all_shapes, jtu.rand_default(), []),\n op_record(\"lcm\", 2, int_dtypes, all_shapes, jtu.rand_default(), []),\n ]\n JAX_REDUCER_NO_DTYPE_RECORDS += [\n op_record(\"ptp\", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),\n ]\n\nif six.PY2:\n JAX_OPERATOR_OVERLOADS += [\n op_record(\"__div__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n ]\n JAX_RIGHT_OPERATOR_OVERLOADS += [\n op_record(\"__rdiv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n ]\n\n\nCombosWithReplacement = itertools.combinations_with_replacement\n\n\ndef _dtypes_are_compatible_for_bitwise_ops(args):\n if len(args) <= 1:\n return True\n is_signed = lambda dtype: onp.issubdtype(dtype, onp.signedinteger)\n width = lambda dtype: onp.iinfo(dtype).bits\n x, y = args\n if width(x) > width(y):\n x, y = y, x\n # The following condition seems a little ad hoc, but seems to capture what\n # numpy actually implements.\n return (\n is_signed(x) == is_signed(y)\n or (width(x) == 32 and width(y) == 32)\n or (width(x) == 32 and width(y) == 64 and is_signed(y)))\n\ndef _shapes_are_broadcast_compatible(shapes):\n accumulator = onp.zeros([])\n for shape in shapes:\n try:\n accumulator = accumulator + onp.zeros(shape)\n except ValueError:\n return False\n return True\n\n\nclass LaxBackedNumpyTests(jtu.JaxTestCase):\n \"\"\"Tests for LAX-backed Numpy implementation.\"\"\"\n\n def _GetArgsMaker(self, rng, shapes, dtypes):\n return lambda: [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(rec.test_name, shapes,\n dtypes),\n \"rng\": rec.rng, \"shapes\": shapes, \"dtypes\": dtypes,\n \"onp_op\": getattr(onp, rec.name), \"lnp_op\": getattr(lnp, rec.name),\n \"check_dtypes\": rec.check_dtypes}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n CombosWithReplacement(rec.shapes, rec.nargs))\n for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))\n for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,\n JAX_COMPOUND_OP_RECORDS)))\n def testOp(self, onp_op, lnp_op, rng, shapes, dtypes, check_dtypes):\n args_maker = self._GetArgsMaker(rng, shapes, dtypes)\n py_scalar_arg = jtu.PYTHON_SCALAR_SHAPE in shapes\n self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,\n check_dtypes=check_dtypes and not py_scalar_arg)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=check_dtypes)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(rec.test_name, shapes,\n dtypes),\n \"rng\": rec.rng, \"shapes\": shapes, \"dtypes\": dtypes, \"name\": rec.name}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n CombosWithReplacement(rec.shapes, rec.nargs))\n for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))\n for rec in JAX_OPERATOR_OVERLOADS))\n def testOperatorOverload(self, name, rng, shapes, dtypes):\n args_maker = self._GetArgsMaker(rng, shapes, dtypes)\n fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)\n self._CompileAndCheck(fun, args_maker,\n check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(rec.test_name, shapes,\n dtypes),\n \"rng\": rec.rng, \"shapes\": shapes, \"dtypes\": dtypes, \"name\": rec.name}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n CombosWithReplacement(rec.shapes, rec.nargs))\n for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))\n for rec in JAX_RIGHT_OPERATOR_OVERLOADS))\n def testRightOperatorOverload(self, name, rng, shapes, dtypes):\n if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:\n raise SkipTest() # TODO(mattjj): clean up\n args_maker = self._GetArgsMaker(rng, shapes, dtypes)\n fun = lambda fst, snd: getattr(snd, name)(fst)\n self._CompileAndCheck(fun, args_maker,\n check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\n rec.test_name, shapes, dtypes),\n \"rng\": rec.rng, \"shapes\": shapes, \"dtypes\": dtypes,\n \"onp_op\": getattr(onp, rec.name), \"lnp_op\": getattr(lnp, rec.name)}\n for shapes in filter(\n _shapes_are_broadcast_compatible,\n CombosWithReplacement(rec.shapes, rec.nargs))\n for dtypes in filter(\n _dtypes_are_compatible_for_bitwise_ops,\n CombosWithReplacement(rec.dtypes, rec.nargs)))\n for rec in JAX_BITWISE_OP_RECORDS))\n def testBitwiseOp(self, onp_op, lnp_op, rng, shapes, dtypes):\n if not FLAGS.jax_enable_x64 and any(\n onp.iinfo(dtype).bits == 64 for dtype in dtypes):\n self.skipTest(\"x64 types are disabled by jax_enable_x64\")\n args_maker = self._GetArgsMaker(rng, shapes, dtypes)\n self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,\n check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"{}_inshape={}_axis={}_dtype={}_keepdims={}\".format(\n rec.test_name.capitalize(),\n jtu.format_shape_dtype_string(shape, dtype), axis,\n \"None\" if out_dtype is None else onp.dtype(out_dtype).name, keepdims),\n \"rng\": rec.rng, \"shape\": shape, \"dtype\": dtype, \"out_dtype\": out_dtype,\n \"onp_op\": getattr(onp, rec.name), \"lnp_op\": getattr(lnp, rec.name),\n \"axis\": axis, \"keepdims\": keepdims}\n for rec in JAX_REDUCER_RECORDS\n for shape in rec.shapes for dtype in rec.dtypes\n for out_dtype in [None] + rec.dtypes\n for axis in set(range(-len(shape), len(shape))) | set([None])\n for keepdims in [False, True]))\n def testReducer(self, onp_op, lnp_op, rng, shape, dtype, out_dtype, axis, keepdims):\n onp_fun = lambda x: onp_op(x, axis, dtype=out_dtype, keepdims=keepdims)\n lnp_fun = lambda x: lnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"{}_inshape={}_axis={}_keepdims={}\".format(\n rec.test_name.capitalize(),\n jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),\n \"rng\": rec.rng, \"shape\": shape, \"dtype\": dtype,\n \"onp_op\": getattr(onp, rec.name), \"lnp_op\": getattr(lnp, rec.name),\n \"axis\": axis, \"keepdims\": keepdims}\n for rec in JAX_REDUCER_NO_DTYPE_RECORDS\n for shape in rec.shapes for dtype in rec.dtypes\n for axis in set(range(-len(shape), len(shape))) | set([None])\n for keepdims in [False, True]))\n def testReducerNoDtype(self, onp_op, lnp_op, rng, shape, dtype, axis, keepdims):\n onp_fun = lambda x: onp_op(x, axis, keepdims=keepdims)\n lnp_fun = lambda x: lnp_op(x, axis, keepdims=keepdims)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for shape in all_shapes for dtype in all_dtypes\n for axis in set(range(-len(shape), len(shape))) | set([None])))\n def testCountNonzero(self, shape, dtype, axis):\n rng = jtu.rand_some_zero()\n onp_fun = lambda x: onp.count_nonzero(x, axis)\n lnp_fun = lambda x: lnp.count_nonzero(x, axis)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"{}_inshape={}_axis={}\".format(\n rec.test_name.capitalize(),\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"rng\": rec.rng, \"shape\": shape, \"dtype\": dtype,\n \"onp_op\": getattr(onp, rec.name), \"lnp_op\": getattr(lnp, rec.name),\n \"axis\": axis}\n for rec in JAX_ARGMINMAX_RECORDS\n for shape in rec.shapes for dtype in rec.dtypes\n for axis in range(-len(shape), len(shape))))\n def testArgMinMax(self, onp_op, lnp_op, rng, shape, dtype, axis):\n if (dtype == onp.complex128 and FLAGS.jax_test_dut and\n FLAGS.jax_test_dut.startswith(\"gpu\")):\n raise unittest.SkipTest(\"complex128 reductions not supported on GPU\")\n\n def onp_fun(array_to_reduce):\n return onp_op(array_to_reduce, axis)\n\n def lnp_fun(array_to_reduce):\n return lnp_op(array_to_reduce, axis)\n\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_{}\".format(\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),\n axes),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"axes\": axes, \"rng\": rng}\n for rng in [jtu.rand_default()]\n for lhs_shape, rhs_shape, axes in [\n [(2,), (2,), (-1, -1, -1, None)], # scalar output\n [(2, 4), (2, 4), (-1, -1, -1, 0)], # 2D vectors\n [(3, 4), (3, 4), (-1, -1, -1, 0)], # 3D vectors\n [(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)], # broadcasting\n [(4, 3), (3, 6, 5, 4), (1, 0, -1, None)], # different axes\n [(6, 1, 3), (5, 3), (-1, -1, -1, None)], # more broadcasting\n [(6, 1, 2), (5, 3), (-1, -1, -1, None)], # mixed 2D and 3D vectors\n [(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)], # axes/broadcasting\n [(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)], # axisc should do nothing\n [(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)] # same as before\n ]\n for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))\n def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng):\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n axisa, axisb, axisc, axis = axes\n lnp_fun = lambda a, b: lnp.cross(a, b, axisa, axisb, axisc, axis)\n onp_fun = lambda a, b: onp.cross(a, b, axisa, axisb, axisc, axis)\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_{}\".format(\n name,\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"rng\": rng}\n for rng in [jtu.rand_default()]\n for name, lhs_shape, rhs_shape in [\n (\"matrix-scalar\", (3, 3), ()),\n (\"scalar-matrix\", (), (3, 3)),\n (\"matrix-vector\", (4, 5), (5,)),\n (\"vector-matrix\", (6,), (6, 4)),\n (\"matrix-matrix\", (3, 4), (4, 5)),\n (\"tensor-vector\", (4, 3, 2), (2,)),\n (\"vector-tensor\", (2,), (3, 2, 4)),\n (\"tensor-matrix\", (4, 3, 2), (2, 5)),\n (\"matrix-tensor\", (5, 2), (3, 2, 4)),\n (\"tensor-tensor\", (2, 3, 4), (5, 4, 1))]\n for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))\n def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng):\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n self._CheckAgainstNumpy(onp.dot, lnp.dot, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp.dot, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_{}\".format(\n name,\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"rng\": rng}\n for rng in [jtu.rand_default()]\n for name, lhs_shape, rhs_shape in [\n (\"vector-vector\", (3,), (3,)),\n (\"matrix-vector\", (3, 3), (3,)),\n (\"vector-matrix\", (3,), (3, 3)),\n (\"matrix-matrix\", (3, 3), (3, 3)),\n (\"vector-tensor\", (3,), (5, 3, 2)),\n (\"tensor-vector\", (5, 3, 2), (2,)),\n (\"matrix-tensor\", (5, 2), (3, 2, 4)),\n (\"tensor-matrix\", (5, 2, 3), (3, 2)),\n (\"tensor-tensor\", (5, 3, 4), (5, 4, 1)),\n (\"tensor-tensor-broadcast\", (3, 1, 3, 4), (5, 4, 1))]\n for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))\n def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng):\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n self._CheckAgainstNumpy(onp.matmul, lnp.matmul, args_maker,\n check_dtypes=True)\n self._CompileAndCheck(lnp.matmul, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}_{}\".format(\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),\n axes),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"axes\": axes, \"rng\": rng}\n for rng in [jtu.rand_default()]\n for lhs_shape, rhs_shape, axes in [\n [(2, 3, 4), (5, 6, 7), 0], # from issue #740\n [(2, 3, 4), (3, 4, 5, 6), 2],\n [(2, 3, 4), (5, 4, 3, 6), [1, 2]],\n [(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],\n [(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],\n ]\n for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))\n def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng):\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n lnp_fun = lambda a, b: lnp.tensordot(a, b, axes)\n onp_fun = lambda a, b: onp.tensordot(a, b, axes)\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_{}\".format(\n jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),\n jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),\n \"lhs_shape\": lhs_shape, \"lhs_dtype\": lhs_dtype,\n \"rhs_shape\": rhs_shape, \"rhs_dtype\": rhs_dtype,\n \"rng\": jtu.rand_default()}\n # TODO(phawkins): support integer dtypes too.\n for lhs_dtype, rhs_dtype in CombosWithReplacement(inexact_dtypes, 2)\n for lhs_shape, rhs_shape in [\n (l, r) for l, r in CombosWithReplacement(all_shapes, 2)\n if len(jtu._dims_of_shape(l)) == 0\n or len(jtu._dims_of_shape(r)) == 0\n or l[-1] == r[-1]]))\n def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng):\n args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]\n onp_fun = lambda lhs, rhs: onp.inner(lhs, rhs)\n lnp_fun = lambda lhs, rhs: lnp.inner(lhs, rhs)\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_amin={}_amax={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),\n \"shape\": shape, \"dtype\": dtype, \"a_min\": a_min, \"a_max\": a_max,\n \"rng\": jtu.rand_default()}\n for shape in all_shapes for dtype in number_dtypes\n for a_min, a_max in [(-1, None), (None, 1), (-1, 1)]))\n def testClipStaticBounds(self, shape, dtype, a_min, a_max, rng):\n onp_fun = lambda x: onp.clip(x, a_min=a_min, a_max=a_max)\n lnp_fun = lambda x: lnp.clip(x, a_min=a_min, a_max=a_max)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_decimals={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), decimals),\n \"shape\": shape, \"dtype\": dtype, \"decimals\": decimals,\n \"rng\": jtu.rand_default()}\n for shape in all_shapes for dtype in number_dtypes\n for decimals in [0, 1, -2]))\n def testRoundStaticDecimals(self, shape, dtype, decimals, rng):\n if onp.issubdtype(dtype, onp.integer) and decimals < 0:\n self.skipTest(\"Integer rounding with decimals < 0 not implemented\")\n onp_fun = lambda x: onp.round(x, decimals=decimals)\n lnp_fun = lambda x: lnp.round(x, decimals=decimals)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_mode={}_rpadwidth={}_rconstantvalues={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), mode, pad_width_rank,\n constant_values_rank),\n \"shape\": shape, \"dtype\": dtype, \"mode\": mode,\n \"pad_width_rank\": pad_width_rank,\n \"constant_values_rank\": constant_values_rank, \"rng\": jtu.rand_default(),\n \"irng\": jtu.rand_int(3)}\n for mode, constant_values_rank, shapes in [\n ('constant', 0, all_shapes),\n ('constant', 1, all_shapes),\n ('constant', 2, all_shapes),\n ('symmetric', None, nonempty_shapes),\n ('reflect', None, nonempty_shapes),\n ('wrap', None, nonempty_shapes),\n ]\n for shape in shapes for dtype in all_dtypes\n for pad_width_rank in range(3)))\n def testPad(self, shape, dtype, mode, pad_width_rank, constant_values_rank,\n rng, irng):\n pad_width = irng([len(shape), 2][2 - pad_width_rank:], onp.int32)\n def onp_fun(x, kwargs):\n if pad_width.size == 0:\n return x\n return onp.pad(x, pad_width, mode=mode, **kwargs)\n def lnp_fun(x, kwargs):\n return lnp.pad(x, pad_width, mode=mode, **kwargs)\n\n def args_maker():\n kwargs = {}\n if constant_values_rank:\n kwargs[\"constant_values\"] = rng(\n [len(shape), 2][2 - constant_values_rank:], dtype)\n return rng(shape, dtype), kwargs\n\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape=[{}]_reps={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), reps),\n \"shape\": shape, \"dtype\": dtype, \"reps\": reps,\n \"rng\": jtu.rand_default()}\n for reps in [(), (2,), (3, 4), (2, 3, 4)]\n for dtype in default_dtypes\n for shape in all_shapes\n ))\n def testTile(self, shape, dtype, reps, rng):\n onp_fun = lambda arg: onp.tile(arg, reps)\n lnp_fun = lambda arg: lnp.tile(arg, reps)\n\n args_maker = lambda: [rng(shape, dtype)]\n\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_axis={}_baseshape=[{}]_dtypes=[{}]\".format(\n axis, \",\".join(str(d) for d in base_shape),\n \",\".join(onp.dtype(dtype).name for dtype in dtypes)),\n \"axis\": axis, \"base_shape\": base_shape, \"dtypes\": dtypes,\n \"rng\": jtu.rand_default()}\n for num_arrs in [3]\n for dtypes in CombosWithReplacement(default_dtypes, num_arrs)\n for base_shape in [(4,), (3, 4), (2, 3, 4)]\n for axis in range(-len(base_shape)+1, len(base_shape))))\n def testConcatenate(self, axis, base_shape, dtypes, rng):\n wrapped_axis = axis % len(base_shape)\n shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]\n for size, _ in zip(itertools.cycle([3, 1, 4]), dtypes)]\n onp_fun = lambda *args: onp.concatenate(args, axis=axis)\n lnp_fun = lambda *args: lnp.concatenate(args, axis=axis)\n\n def args_maker():\n return [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]\n\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_axis={}_baseshape=[{}]_dtypes=[{}]\".format(\n axis, \",\".join(str(d) for d in base_shape),\n \",\".join(onp.dtype(dtype).name for dtype in dtypes)),\n \"axis\": axis, \"base_shape\": base_shape, \"dtypes\": dtypes,\n \"rng\": jtu.rand_default()}\n for dtypes in CombosWithReplacement(default_dtypes, 2)\n for base_shape in [(4,), (3, 4), (2, 3, 4)]\n for axis in range(-len(base_shape)+1, len(base_shape))))\n def testAppend(self, axis, base_shape, dtypes, rng):\n wrapped_axis = axis % len(base_shape)\n shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]\n for size, _ in zip(itertools.cycle([3, 1, 4]), dtypes)]\n onp_fun = lambda arr, values: onp.append(arr, values, axis=axis)\n lnp_fun = lambda arr, values: lnp.append(arr, values, axis=axis)\n\n def args_maker():\n return [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]\n\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape=[{}]_axis={}_repeats={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, repeats),\n \"axis\": axis, \"shape\": shape, \"dtype\": dtype, \"repeats\": repeats,\n \"rng\": jtu.rand_default()}\n for repeats in [0, 1, 2]\n for dtype in default_dtypes\n for shape in all_shapes\n for axis in [None] + list(range(-len(shape), len(shape)))))\n def testRepeat(self, axis, shape, dtype, repeats, rng):\n onp_fun = lambda arg: onp.repeat(arg, repeats=repeats, axis=axis)\n lnp_fun = lambda arg: lnp.repeat(arg, repeats=repeats, axis=axis)\n\n args_maker = lambda: [rng(shape, dtype)]\n\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"op={}_shape=[{}]_axis={}_out_dtype={}\".format(\n op, jtu.format_shape_dtype_string(shape, dtype), axis, out_dtype),\n \"axis\": axis, \"shape\": shape, \"dtype\": dtype, \"out_dtype\": out_dtype,\n \"rng\": jtu.rand_default(), \"lnp_op\": getattr(lnp, op),\n \"onp_op\": getattr(onp, op)}\n for op in [\"cumsum\", \"cumprod\"]\n # TODO(phawkins): replace both type lists with default_dtypes after a\n # Jaxlib update includes\n # https://github.com/google/jax/commit/86f5d189cf563b027c3cd00eea38072c003905c8\n for dtype in [onp.float32, onp.int32]\n for out_dtype in [onp.float32, onp.int32]\n for shape in all_shapes\n for axis in [None] + list(range(-len(shape), len(shape)))))\n def testCumSumProd(self, axis, shape, dtype, out_dtype, onp_op, lnp_op, rng):\n onp_fun = lambda arg: onp_op(arg, axis=axis, dtype=out_dtype)\n lnp_fun = lambda arg: lnp_op(arg, axis=axis, dtype=out_dtype)\n\n args_maker = lambda: [rng(shape, dtype)]\n\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_dtype={}_m={}_n={}_k={}\".format(\n onp.dtype(dtype).name, m, n, k),\n \"m\": m, \"n\": n, \"k\": k, \"dtype\": dtype, \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for n in [0, 4]\n for m in [None, 0, 1, 3, 4]\n for k in list(range(-4, 4))))\n def testTri(self, m, n, k, dtype, rng):\n onp_fun = lambda: onp.tri(n, M=m, k=k, dtype=dtype)\n lnp_fun = lambda: lnp.tri(n, M=m, k=k, dtype=dtype)\n args_maker = lambda: []\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_shape={}_k={}\".format(\n op, jtu.format_shape_dtype_string(shape, dtype), k),\n \"dtype\": dtype, \"shape\": shape, \"op\": op, \"k\": k,\n \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for shape in [shape for shape in all_shapes if len(shape) >= 2]\n for op in [\"tril\", \"triu\"]\n for k in list(range(-3, 3))))\n def testTriLU(self, dtype, shape, op, k, rng):\n onp_fun = lambda arg: getattr(onp, op)(arg, k=k)\n lnp_fun = lambda arg: getattr(lnp, op)(arg, k=k)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_k={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), k),\n \"dtype\": dtype, \"shape\": shape, \"k\": k, \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]\n for k in list(range(-4, 4))))\n def testDiag(self, shape, dtype, k, rng):\n onp_fun = lambda arg: onp.diag(arg, k)\n lnp_fun = lambda arg: lnp.diag(arg, k)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_offset={}_axis1={}_axis2={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),\n \"dtype\": dtype, \"shape\": shape, \"offset\": offset, \"axis1\": axis1,\n \"axis2\": axis2, \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for shape in [shape for shape in all_shapes if len(shape) >= 2]\n for axis1 in range(-len(shape), len(shape))\n for axis2 in [a for a in range(-len(shape), len(shape))\n if a % len(shape) != axis1 % len(shape)]\n for offset in list(range(-4, 4))))\n def testDiagonal(self, shape, dtype, offset, axis1, axis2, rng):\n onp_fun = lambda arg: onp.diagonal(arg, offset, axis1, axis2)\n lnp_fun = lambda arg: lnp.diagonal(arg, offset, axis1, axis2)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_n={}\".format(onp.dtype(dtype).name, n),\n \"dtype\": dtype, \"n\": n}\n for dtype in default_dtypes\n for n in list(range(4))))\n def testIdentity(self, n, dtype):\n onp_fun = lambda: onp.identity(n, dtype)\n lnp_fun = lambda: lnp.identity(n, dtype)\n args_maker = lambda: []\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_dtype_{}_offset={}_axis1={}_axis2={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n out_dtype, offset, axis1, axis2),\n \"dtype\": dtype, \"out_dtype\": out_dtype, \"shape\": shape, \"offset\": offset,\n \"axis1\": axis1, \"axis2\": axis2, \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for out_dtype in [None] + number_dtypes\n for shape in [shape for shape in all_shapes if len(shape) >= 2]\n for axis1 in range(-len(shape), len(shape))\n for axis2 in range(-len(shape), len(shape))\n if (axis1 % len(shape)) != (axis2 % len(shape))\n for offset in list(range(-4, 4))))\n def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng):\n onp_fun = lambda arg: onp.trace(arg, offset, axis1, axis2, out_dtype)\n lnp_fun = lambda arg: lnp.trace(arg, offset, axis1, axis2, out_dtype)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}\".format(\n jtu.format_test_name_suffix(\"\", [shape] * len(dtypes), dtypes), axis),\n \"shape\": shape, \"axis\": axis, \"dtypes\": dtypes, \"rng\": rng}\n for dtypes in [\n [onp.float32],\n [onp.float32, onp.float32],\n [onp.float32, onp.int32, onp.float32],\n [onp.float32, onp.int64, onp.float32],\n [onp.float32, onp.int32, onp.float64],\n ]\n for shape in [(), (2,), (3, 4), (1, 100)]\n for axis in range(-len(shape), len(shape) + 1)\n for rng in [jtu.rand_default()]))\n def testStack(self, shape, axis, dtypes, rng):\n args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]\n onp_fun = partial(onp.stack, axis=axis)\n lnp_fun = partial(lnp.stack, axis=axis)\n self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_{}\".format(\n op, jtu.format_test_name_suffix(\"\", [shape] * len(dtypes), dtypes)),\n \"shape\": shape, \"op\": op, \"dtypes\": dtypes, \"rng\": rng}\n for op in [\"hstack\", \"vstack\", \"dstack\"]\n for dtypes in [\n [onp.float32],\n [onp.float32, onp.float32],\n [onp.float32, onp.int32, onp.float32],\n [onp.float32, onp.int64, onp.float32],\n [onp.float32, onp.int32, onp.float64],\n ]\n for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]\n for rng in [jtu.rand_default()]))\n def testHVDStack(self, shape, op, dtypes, rng):\n args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]\n onp_fun = getattr(onp, op)\n lnp_fun = getattr(lnp, op)\n self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outdtype={}\".format(\n jtu.format_shape_dtype_string(shape, fill_value_dtype),\n onp.dtype(out_dtype).name if out_dtype else \"None\"),\n \"shape\": shape, \"fill_value_dtype\": fill_value_dtype,\n \"out_dtype\": out_dtype, \"rng\": jtu.rand_default()}\n for shape in array_shapes\n for fill_value_dtype in default_dtypes\n for out_dtype in [None] + default_dtypes))\n def testFull(self, shape, fill_value_dtype, out_dtype, rng):\n onp_fun = lambda fill_value: onp.full(shape, fill_value, dtype=out_dtype)\n lnp_fun = lambda fill_value: lnp.full(shape, fill_value, dtype=out_dtype)\n args_maker = lambda: [rng((), fill_value_dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_filldtype={}_outdtype={}\".format(\n jtu.format_shape_dtype_string(shape, in_dtype),\n onp.dtype(fill_value_dtype).name,\n onp.dtype(out_dtype).name),\n \"shape\": shape, \"in_dtype\": in_dtype,\n \"fill_value_dtype\": fill_value_dtype, \"out_dtype\": out_dtype,\n \"rng\": jtu.rand_default()}\n for shape in array_shapes\n for in_dtype in default_dtypes\n for fill_value_dtype in default_dtypes\n for out_dtype in default_dtypes))\n def testFullLike(self, shape, in_dtype, fill_value_dtype, out_dtype, rng):\n onp_fun = lambda x, fill_value: onp.full_like(x, fill_value, dtype=out_dtype)\n lnp_fun = lambda x, fill_value: lnp.full_like(x, fill_value, dtype=out_dtype)\n args_maker = lambda: [rng(shape, in_dtype), rng((), fill_value_dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_{}sections\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),\n \"shape\": shape, \"num_sections\": num_sections, \"axis\": axis,\n \"dtype\": dtype, \"rng\": jtu.rand_default()}\n for shape, axis, num_sections in [\n ((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),\n ((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]\n for dtype in default_dtypes))\n def testSplitStaticInt(self, shape, num_sections, axis, dtype, rng):\n onp_fun = lambda x: onp.split(x, num_sections, axis=axis)\n lnp_fun = lambda x: lnp.split(x, num_sections, axis=axis)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}_{}sections\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),\n \"shape\": shape, \"num_sections\": num_sections, \"axis\": axis,\n \"dtype\": dtype, \"rng\": jtu.rand_default()}\n for shape, axis, num_sections in [\n ((12, 4), 0, 4), ((12, 4), 1, 2),\n ((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]\n for dtype in default_dtypes))\n def testHVDSplit(self, shape, num_sections, axis, dtype, rng):\n def fn(module, axis):\n if axis == 0:\n return module.vsplit\n elif axis == 1:\n return module.hsplit\n else:\n assert axis == 2\n return module.dsplit\n\n onp_fun = lambda x: fn(onp, axis)(x, num_sections)\n lnp_fun = lambda x: fn(lnp, axis)(x, num_sections)\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}_order={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n jtu.format_shape_dtype_string(out_shape, dtype),\n order),\n \"arg_shape\": arg_shape, \"out_shape\": out_shape, \"dtype\": dtype,\n \"order\": order, \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for order in [\"C\", \"F\"]\n for arg_shape, out_shape in [\n (jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),\n ((), (1, 1, 1)),\n ((7, 0), (0, 42, 101)),\n ((3, 4), 12),\n ((3, 4), (12,)),\n ((3, 4), -1),\n ((2, 1, 4), (-1,)),\n ((2, 2, 4), (2, 8))\n ]))\n def testReshape(self, arg_shape, out_shape, dtype, order, rng):\n onp_fun = lambda x: onp.reshape(x, out_shape, order=order)\n lnp_fun = lambda x: lnp.reshape(x, out_shape, order=order)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n jtu.format_shape_dtype_string(out_shape, dtype)),\n \"arg_shape\": arg_shape, \"out_shape\": out_shape, \"dtype\": dtype,\n \"rng\": jtu.rand_default()}\n for dtype in default_dtypes\n for arg_shape, out_shape in [\n ((7, 0), (0, 42, 101)),\n ((2, 1, 4), (-1,)),\n ((2, 2, 4), (2, 8))\n ]))\n def testReshapeMethod(self, arg_shape, out_shape, dtype, rng):\n onp_fun = lambda x: onp.reshape(x, out_shape)\n lnp_fun = lambda x: x.reshape(*out_shape)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_expanddim={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype), dim),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"dim\": dim,\n \"rng\": jtu.rand_default()}\n for arg_shape in [(), (3,), (3, 4)]\n for dtype in default_dtypes\n for dim in range(-len(arg_shape)+1, len(arg_shape))))\n def testExpandDimsStaticDim(self, arg_shape, dtype, dim, rng):\n onp_fun = lambda x: onp.expand_dims(x, dim)\n lnp_fun = lambda x: lnp.expand_dims(x, dim)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_axes=({},{})\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"ax1\": ax1, \"ax2\": ax2,\n \"rng\": jtu.rand_default()}\n for arg_shape, ax1, ax2 in [\n ((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),\n ((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]\n for dtype in default_dtypes))\n def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2, rng):\n onp_fun = lambda x: onp.swapaxes(x, ax1, ax2)\n lnp_fun = lambda x: lnp.swapaxes(x, ax1, ax2)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_axis={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype), ax),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"ax\": ax,\n \"rng\": jtu.rand_default()}\n for arg_shape, ax in [\n ((3, 1), None),\n ((3, 1), 1),\n ((1, 3, 1), (0, 2)),\n ((1, 4, 1), (0,))]\n for dtype in default_dtypes))\n def testSqueeze(self, arg_shape, dtype, ax, rng):\n onp_fun = lambda x: onp.squeeze(x, ax)\n lnp_fun = lambda x: lnp.squeeze(x, ax)\n args_maker = lambda: [rng(arg_shape, dtype)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_axis={}_weights={}_returned={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n axis,\n (None if weights_shape == None else jtu.format_shape_dtype_string(weights_shape, dtype)),\n returned),\n \"rng\": jtu.rand_default(), \"shape\": shape, \"dtype\": dtype, \"axis\": axis,\n \"weights_shape\": weights_shape, \"returned\": returned}\n for shape in nonempty_shapes\n for dtype in number_dtypes\n for axis in set(range(-len(shape), len(shape))) | set([None])\n # `weights_shape` is either `None`, same as the averaged axis, or same as\n # that of the input\n for weights_shape in ([None, shape] if axis is None else [None, (shape[axis],), shape])\n for returned in [False, True]))\n def testAverage(self, shape, dtype, axis, weights_shape, returned, rng):\n onp_fun = lambda x, weights: onp.average(x, axis, weights, returned)\n lnp_fun = lambda x, weights: lnp.average(x, axis, weights, returned)\n args_maker = lambda: [rng(shape, dtype),\n None if weights_shape is None else rng(weights_shape, dtype)]\n\n try:\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n except ZeroDivisionError:\n self.skipTest(\"don't support checking for ZeroDivisionError\")\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_arg{}\".format(i), \"arg\": arg}\n for i, arg in enumerate([\n 3., [1, 2, 3], [1., 2., 3.],\n [[1, 2], [3, 4], [5, 6]], [[1, 2.], [3, 4], [5, 6]],\n [[3, onp.array(2), 1], onp.arange(3.)],\n ])))\n def testArray(self, arg):\n args_maker = lambda: [arg]\n self._CheckAgainstNumpy(onp.array, lnp.array, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp.array, args_maker, check_dtypes=True)\n\n def testIssue121(self):\n assert not onp.isscalar(lnp.array(3))\n\n def testArrayMethod(self):\n class arraylike(object):\n dtype = onp.float32\n def __array__(self, dtype=None):\n return 3.\n a = arraylike()\n ans = lnp.array(a)\n assert ans == 3.\n\n def testAllClose(self):\n rng = onp.random.RandomState(0)\n x = rng.randn(2, 2)\n y = rng.randn(2)\n\n def same(list1, list2):\n allclose = functools.partial(lnp.allclose, atol=1e-3, rtol=1e-3)\n elements_close = list(map(allclose, list1, list2))\n return lnp.all(lnp.array(elements_close))\n\n csame = api.jit(same)\n\n a1 = same((x, y), (x, y))\n a2 = csame((x, y), (x, y))\n a3 = csame((x, y), (x, 2 * y))\n\n self.assertTrue(a1)\n self.assertTrue(a2)\n self.assertFalse(a3)\n\n @jtu.skip_on_devices(\"tpu\") # TODO(mattjj): investigate this failure\n def testOnesBroadcastingConstantHandler(self):\n # TODO(mattjj): update this test for jax3\n self.skipTest(\"test needs jax3 update\")\n\n def fun(x):\n ones = lnp.ones((3, 4))\n assert isinstance(ones, onp.ndarray) and ones.strides == (0, 0)\n\n # To check that the constant handler generates a Broadcast for stride-zero\n # arrays, we monkey-patch the client instance.\n # TODO(mattjj): once we have better HLO dumping and inspecting facilities,\n # we can check the HLO more directly.\n c = x._node.c\n Broadcast = c.Broadcast # pylint: disable=invalid-name\n was_called = []\n c.Broadcast = lambda *args: was_called.append(True) or Broadcast(*args)\n out = x + ones # the ndarray constant handler should call Broadcast here\n assert was_called, \"Broadcast was not called.\"\n\n return out\n\n fun = api.jit(fun)\n out_val = fun(lnp.ones(4))\n self.assertAllClose(out_val, onp.full((3, 4), 2.), check_dtypes=False)\n\n def testZeroStridesConstantHandler(self):\n raw_const = onp.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)\n const = onp.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))\n\n def fun(x):\n return x * const\n\n fun = api.jit(fun)\n out_val = fun(3.)\n self.assertAllClose(out_val, 3. * const, check_dtypes=False)\n\n def testIsInstanceNdarrayDuringTracing(self):\n arr = onp.ones(3)\n\n @api.jit\n def f(x):\n self.assertIsInstance(x, lnp.ndarray)\n return lnp.sum(x)\n\n f(arr)\n\n\n def testNonArrayErrorMessage(self):\n x = [1., 2.]\n y = onp.array([3., 4.])\n\n def g(x, y):\n return lnp.add(x, y)\n\n def f(x, y):\n return lnp.dot(x, y)\n\n self.assertRaises(TypeError, lambda: g(x, y))\n self.assertRaises(TypeError, lambda: f(x, y))\n self.assertRaises(TypeError, lambda: api.jit(g)(x, y))\n self.assertRaises(TypeError, lambda: api.jit(f)(x, y))\n\n def testAbstractionErrorMessage(self):\n\n @api.jit\n def f(x, n):\n for _ in range(n):\n x = x * x\n return x\n\n self.assertRaises(TypeError, lambda: f(3., 3))\n\n @api.jit\n def g(x):\n if x > 0.:\n return x * 2\n else:\n return x + 2\n\n self.assertRaises(TypeError, lambda: g(3.))\n\n def testTracingPrimitiveWithNoTranslationErrorMessage(self):\n # TODO(mattjj): update this for jax3\n self.skipTest(\"test needs jax3 update\")\n foo = lnp._not_implemented(lambda x: x)\n\n # No error if there's no tracing.\n foo(onp.arange(3))\n\n cfoo = api.jit(foo)\n self.assertRaises(NotImplementedError, lambda: cfoo(onp.arange(3)))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for shape in [(3,), (2, 3)]\n for dtype in default_dtypes\n for axis in range(-len(shape), len(shape)) # Test negative axes\n for rng in [jtu.rand_default()]))\n def testFlip(self, shape, dtype, axis, rng):\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n lnp_op = lambda x: lnp.flip(x, axis)\n onp_op = lambda x: onp.flip(x, axis)\n self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype}\n for shape in [(3,), (2, 3), (3, 2, 4)]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testFlipud(self, shape, dtype, rng):\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n lnp_op = lambda x: lnp.flipud(x)\n onp_op = lambda x: onp.flipud(x)\n self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype}\n for shape in [(3, 2), (2, 3), (3, 2, 4)]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testFliplr(self, shape, dtype, rng):\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n lnp_op = lambda x: lnp.fliplr(x)\n onp_op = lambda x: onp.fliplr(x)\n self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_k={}_axes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), k, axes),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype, \"k\": k, \"axes\": axes}\n for shape, axes in [\n [(2, 3), (0, 1)],\n [(2, 3), (1, 0)],\n [(4, 3, 2), (0, 2)],\n [(4, 3, 2), (2, 1)],\n ]\n for k in range(-3, 4)\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testRot90(self, shape, dtype, k, axes, rng):\n args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n lnp_op = lambda x: lnp.rot90(x, k, axes)\n onp_op = lambda x: onp.rot90(x, k, axes)\n self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n # TODO(mattjj): test infix operator overrides\n\n def testRavel(self):\n rng = onp.random.RandomState(0)\n args_maker = lambda: [rng.randn(3, 4).astype(\"float32\")]\n self._CompileAndCheck(lambda x: x.ravel(), args_maker, check_dtypes=True)\n\n def testAstype(self):\n rng = onp.random.RandomState(0)\n args_maker = lambda: [rng.randn(3, 4).astype(\"float32\")]\n op = lambda x: x.astype(lnp.int32)\n self._CheckAgainstNumpy(op, op, args_maker, check_dtypes=True)\n self._CompileAndCheck(op, args_maker, check_dtypes=True)\n\n # TODO(mattjj): test other ndarray-like method overrides\n\n def testOnpMean(self):\n # from https://github.com/google/jax/issues/125\n x = lax.add(lnp.eye(3), 0.)\n ans = onp.mean(x)\n self.assertAllClose(ans, onp.array(1./3), check_dtypes=False)\n\n def testArangeOnFloats(self):\n # from https://github.com/google/jax/issues/145\n expected = onp.arange(0.0, 1.0, 0.1)\n ans = lnp.arange(0.0, 1.0, 0.1)\n self.assertAllClose(expected, ans, check_dtypes=True)\n\n def testSortManually(self):\n # manual tests for sort are nice because we don't have to worry about ties.\n # lax.sort is tested combinatorially.\n ans = lnp.sort(onp.array([16, 15, 23, 42, 8, 4]))\n expected = onp.array([4, 8, 15, 16, 23, 42])\n self.assertAllClose(expected, ans, check_dtypes=True)\n\n a = onp.array([[1, 4], [3, 1]])\n ans = lnp.sort(a, axis=None)\n expected = onp.array([1, 1, 3, 4])\n self.assertAllClose(expected, ans, check_dtypes=True)\n\n a = onp.array([[1, 4], [3, 1]])\n ans = lnp.sort(a) # last axis\n expected = onp.array([[1, 4], [1, 3]])\n self.assertAllClose(expected, ans, check_dtypes=True)\n\n a = onp.array([[1, 4], [3, 1]])\n ans = lnp.sort(a, axis=0)\n expected = onp.array([[1, 1], [3, 4]])\n self.assertAllClose(expected, ans, check_dtypes=True)\n\n def testArgsortManually(self):\n x = onp.array([16, 15, 23, 42, 8, 4])\n ans = lnp.argsort(x)\n expected = onp.argsort(x)\n self.assertAllClose(expected, ans, check_dtypes=False)\n\n x = onp.array([[16, 15, 23], [42, 8, 4]])\n ans = lnp.argsort(x, axis=0)\n expected = onp.argsort(x, axis=0)\n self.assertAllClose(expected, ans, check_dtypes=False)\n\n x = onp.array([[16, 15, 23], [42, 8, 4]])\n ans = lnp.argsort(x, axis=1)\n expected = onp.argsort(x, axis=1)\n self.assertAllClose(expected, ans, check_dtypes=False)\n\n x = onp.array([[16, 15, 23], [42, 8, 4]])\n ans = lnp.argsort(x, axis=None)\n expected = onp.argsort(x, axis=None)\n self.assertAllClose(expected, ans, check_dtypes=False)\n\n x = onp.array([[16, 15, 23], [42, 8, 4]])\n ans = lnp.argsort(x)\n expected = onp.argsort(x)\n self.assertAllClose(expected, ans, check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_shifts={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n shifts, axis),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype, \"shifts\": shifts,\n \"axis\": axis}\n for dtype in all_dtypes\n for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]\n for shifts, axis in [\n (3, None),\n (1, 1),\n ((3,), (0,)),\n ((-2,), (-2,)),\n ((1, 2), (0, -1))\n ]\n for rng in [jtu.rand_default()]))\n def testRoll(self, shape, dtype, shifts, axis, rng):\n args_maker = lambda: [rng(shape, dtype)]\n lnp_op = lambda x: lnp.roll(x, shifts, axis=axis)\n onp_op = lambda x: onp.roll(x, shifts, axis=axis)\n self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_index={}_axis={}_mode={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n jtu.format_shape_dtype_string(index_shape, index_dtype),\n axis, mode),\n \"rng\": rng, \"rng_indices\": rng_indices, \"shape\": shape,\n \"index_shape\": index_shape, \"dtype\": dtype, \"index_dtype\": index_dtype,\n \"axis\": axis, \"mode\": mode}\n for shape in [(3,), (3, 4), (3, 4, 5)]\n for index_shape in scalar_shapes + [(3,), (2, 1, 3)]\n for axis in itertools.chain(range(-len(shape), len(shape)), [None])\n for dtype in all_dtypes\n for index_dtype in int_dtypes\n for mode in ['wrap', 'clip']\n for rng in [jtu.rand_default()]\n for rng_indices in [jtu.rand_int(-5, 5)]))\n def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode, rng,\n rng_indices):\n def args_maker():\n x = rng(shape, dtype)\n i = rng_indices(index_shape, index_dtype)\n return x, i\n\n lnp_op = lambda x, i: lnp.take(x, i, axis=axis, mode=mode)\n onp_op = lambda x, i: onp.take(x, i, axis=axis, mode=mode)\n self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for shape in [(3,), (3, 4), (3, 4, 5)]\n for axis in itertools.chain(range(len(shape)), [-1], [None])\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testTakeAlongAxis(self, shape, dtype, axis, rng):\n def args_maker():\n x = rng(shape, dtype)\n i = onp.argsort(x, axis=axis)\n return x, i\n\n lnp_op = lambda x, i: lnp.take_along_axis(x, i, axis=axis)\n\n if hasattr(onp, \"take_along_axis\"):\n onp_op = lambda x, i: onp.take_along_axis(x, i, axis=axis)\n self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_n={}_increasing={}\".format(\n jtu.format_shape_dtype_string([shape], dtype),\n n, increasing),\n \"dtype\": dtype, \"shape\": shape, \"n\": n, \"increasing\": increasing,\n \"rng\": jtu.rand_default()}\n for dtype in inexact_dtypes\n for shape in [0, 5]\n for n in [2, 4]\n for increasing in [False, True]))\n def testVander(self, shape, dtype, n, increasing, rng):\n onp_fun = lambda arg: onp.vander(arg, N=n, increasing=increasing)\n lnp_fun = lambda arg: lnp.vander(arg, N=n, increasing=increasing)\n args_maker = lambda: [rng([shape], dtype)]\n # np.vander seems to return float64 for all floating types. We could obey\n # those semantics, but they seem like a bug.\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\"nan_to_num\", [shape],\n [dtype]),\n \"rng\": jtu.rand_some_inf_and_nan(), \"shape\": shape, \"dtype\": dtype}\n for shape in all_shapes\n for dtype in inexact_dtypes))\n def testNanToNum(self, rng, shape, dtype):\n dtype = onp.dtype(xla_bridge.canonicalize_dtype(dtype)).type\n args_maker = lambda: [rng(shape, dtype)]\n self._CheckAgainstNumpy(onp.nan_to_num, lnp.nan_to_num, args_maker,\n check_dtypes=True)\n self._CompileAndCheck(lnp.nan_to_num, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\"ix_\", shapes, dtypes),\n \"rng\": jtu.rand_default(), \"shapes\": shapes, \"dtypes\": dtypes}\n for shapes, dtypes in (\n ((), ()),\n (((7,),), (onp.float32,)),\n (((3,), (4,)), (onp.float32, onp.int32)),\n (((3,), (0,), (4,)), (onp.int32, onp.float32, onp.int32)),\n )))\n def testIx_(self, rng, shapes, dtypes):\n args_maker = lambda: [rng(shape, dtype)\n for shape, dtype in zip(shapes, dtypes)]\n self._CheckAgainstNumpy(onp.ix_, lnp.ix_, args_maker,\n check_dtypes=True)\n self._CompileAndCheck(lnp.ix_, args_maker, check_dtypes=True)\n\n def testIssue330(self):\n x = lnp.full((1, 1), lnp.array([1])[0]) # doesn't crash\n self.assertEqual(x[0, 0], 1)\n\n def testScalarDtypePromotion(self):\n # disabled this test after https://github.com/google/jax/issues/732\n msg = (\"jax.numpy differs from numpy in promotion rules for Python scalars.\"\n \" See https://github.com/google/jax/issues/732.\")\n raise SkipTest(msg)\n orig_numpy_result = (1 + onp.eye(1, dtype=onp.float32)).dtype\n jax_numpy_result = (1 + lnp.eye(1, dtype=lnp.float32)).dtype\n self.assertEqual(orig_numpy_result, jax_numpy_result)\n\n def testSymmetrizeDtypePromotion(self):\n x = onp.eye(3, dtype=onp.float32)\n orig_numpy_result = ((x + x.T) / 2).dtype\n\n x = lnp.eye(3, dtype=lnp.float32)\n jax_numpy_result = ((x + x.T) / 2).dtype\n self.assertEqual(orig_numpy_result, jax_numpy_result)\n\n def testIssue347(self):\n # https://github.com/google/jax/issues/347\n def test_fail(x):\n x = lnp.sqrt(lnp.sum(x ** 2, axis=1))\n ones = lnp.ones_like(x)\n x = lnp.where(x > 0.5, x, ones)\n return lnp.sum(x)\n\n x = lnp.array([[1, 2], [3, 4], [0, 0]], dtype=lnp.float64)\n result = api.grad(test_fail)(x)\n assert not onp.any(onp.isnan(result))\n\n def testIssue453(self):\n # https://github.com/google/jax/issues/453\n a = onp.arange(6) + 1\n ans = lnp.reshape(a, (3, 2), order='F')\n expected = onp.reshape(a, (3, 2), order='F')\n self.assertAllClose(ans, expected, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_dtype={}\".format(\n op, {bool: \"bool\", int: \"int\", float: \"float\"}[dtype]),\n \"dtype\": dtype, \"op\": op}\n for dtype in [int, float, bool]\n for op in [\"atleast_1d\", \"atleast_2d\", \"atleast_3d\"]))\n def testAtLeastNdLiterals(self, dtype, op):\n # Fixes: https://github.com/google/jax/issues/634\n onp_fun = lambda arg: getattr(onp, op)(arg)\n lnp_fun = lambda arg: getattr(lnp, op)(arg)\n args_maker = lambda: [dtype(2)]\n self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n\n\n def testLongLong(self):\n self.assertAllClose(onp.int64(7), api.jit(lambda x: x)(onp.longlong(7)),\n check_dtypes=True)\n\n def testArange(self):\n # test cases inspired by dask tests at\n # https://github.com/dask/dask/blob/master/dask/array/tests/test_creation.py#L92\n self.assertAllClose(lnp.arange(77),\n onp.arange(77), check_dtypes=True)\n self.assertAllClose(lnp.arange(2, 13),\n onp.arange(2, 13), check_dtypes=True)\n self.assertAllClose(lnp.arange(4, 21, 9),\n onp.arange(4, 21, 9), check_dtypes=True)\n self.assertAllClose(lnp.arange(53, 5, -3),\n onp.arange(53, 5, -3), check_dtypes=True)\n # TODO(mattjj): make these tests work when jax_enable_x64=True\n # self.assertAllClose(lnp.arange(77, dtype=float),\n # onp.arange(77, dtype=float), check_dtypes=True)\n # self.assertAllClose(lnp.arange(2, 13, dtype=int),\n # onp.arange(2, 13, dtype=int), check_dtypes=True)\n self.assertAllClose(lnp.arange(0, 1, -0.5),\n onp.arange(0, 1, -0.5), check_dtypes=True)\n\n self.assertRaises(TypeError, lambda: lnp.arange())\n\n # test that lnp.arange(N) doesn't instantiate an ndarray\n self.assertFalse(type(lnp.arange(77)) == type(onp.arange(77)))\n self.assertTrue(type(lnp.arange(77)) == type(lax.iota(onp.int32, 77)))\n\n def testIssue830(self):\n a = lnp.arange(4, dtype=lnp.complex64)\n self.assertEqual(a.dtype, lnp.complex64)\n\n def testIssue728(self):\n assert lnp.allclose(lnp.eye(5000), onp.eye(5000))\n self.assertEqual(0, onp.sum(lnp.eye(1050) - onp.eye(1050)))\n\n def testIssue746(self):\n lnp.arange(12).reshape(3, 4) # doesn't crash\n\n def testIssue764(self):\n x = lnp.linspace(190, 200, 4)\n f = api.grad(lambda x: lnp.sum(lnp.tanh(x)))\n # Expected values computed with autograd in float64 precision.\n expected = onp.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,\n 7.66067839e-174], onp.float64)\n self.assertAllClose(f(x), expected, check_dtypes=False)\n\n def testIssue776(self):\n \"\"\"Tests that the scatter-add transpose rule instantiates symbolic zeros.\"\"\"\n def f(u):\n y = jax.ops.index_add(onp.ones(10,), [2, 4, 5], u)\n # The transpose rule for lax.tie_in returns a symbolic zero for its first\n # argument.\n return lax.tie_in(y, 7.)\n\n self.assertAllClose(onp.zeros(3,), api.grad(f)(onp.ones(3,)),\n check_dtypes=True)\n\n @parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(op, [()], [dtype]),\n \"dtype\": dtype, \"op\": op}\n for dtype in float_dtypes\n for op in (\"sqrt\", \"arccos\", \"arcsin\", \"arctan\", \"sin\", \"cos\", \"tan\",\n \"sinh\", \"cosh\", \"tanh\", \"arccosh\", \"arcsinh\", \"arctanh\", \"exp\",\n \"log\", \"expm1\", \"log1p\")))\n def testMathSpecialFloatValues(self, op, dtype):\n onp_op = getattr(onp, op)\n lnp_op = getattr(lnp, op)\n dtype = onp.dtype(xla_bridge.canonicalize_dtype(dtype)).type\n for x in (onp.nan, -onp.inf, -100., -2. -1., 0., 1., 2., 100., onp.inf,\n onp.finfo(dtype).max, onp.sqrt(onp.finfo(dtype).max),\n onp.sqrt(onp.finfo(dtype).max) * 2.):\n if onp.isnan(x) and op in (\"cosh\", \"expm1\", \"exp\"):\n # TODO(b/133842876, b/133842870): these return wrong outputs on CPU for\n # NaN inputs.\n continue\n if (op in (\"sin\", \"cos\", \"tan\", \"arctan\") and FLAGS.jax_test_dut and\n FLAGS.jax_test_dut.startswith(\"tpu\")):\n continue # TODO(b/132196789, b/134175194): fix and reenable.\n x = dtype(x)\n expected = onp_op(x)\n actual = lnp_op(x)\n self.assertAllClose(expected, actual, check_dtypes=True)\n\n def testIssue883(self):\n # from https://github.com/google/jax/issues/883\n\n @partial(api.jit, static_argnums=(1,))\n def f(x, v):\n return x\n\n x = lnp.ones((10, 10))\n v = lnp.array([1, 2, 3])\n first_call = f(x, v)\n second_call = f(x, v) # doesn't crash\n\n def testReductionOfOutOfBoundsAxis(self): # Issue 888\n x = lnp.ones((3, 4))\n self.assertRaises(ValueError, lambda: lnp.sum(x, axis=2))\n\n\nif __name__ == \"__main__\":\n absltest.main()\n" ]
[ [ "numpy.ones", "numpy.version.version.split", "numpy.diag", "numpy.take", "numpy.dtype", "numpy.issubdtype", "numpy.argsort", "numpy.cross", "numpy.vander", "numpy.int64", "numpy.random.RandomState", "numpy.trace", "numpy.diagonal", "numpy.full_like", "numpy.append", "numpy.concatenate", "numpy.reshape", "numpy.fliplr", "numpy.tensordot", "numpy.take_along_axis", "numpy.expand_dims", "numpy.isnan", "numpy.identity", "numpy.average", "numpy.mean", "numpy.longlong", "numpy.eye", "numpy.tile", "numpy.tri", "numpy.zeros", "numpy.repeat", "numpy.count_nonzero", "numpy.arange", "numpy.broadcast_to", "numpy.pad", "numpy.finfo", "numpy.array", "numpy.roll", "numpy.flipud", "numpy.squeeze", "numpy.swapaxes", "numpy.iinfo", "numpy.clip", "numpy.rot90", "numpy.flip", "numpy.round", "numpy.inner", "numpy.full", "numpy.split" ] ]
DiogoRibeiro7/Machine-Learning
[ "d2c789851f8b4eaf74cdd0c18af072f60cd45cb3" ]
[ "Ensemble Learning/AdaBoost.py" ]
[ "\"\"\"\n@Filename: AdaptiveBoost.py\n@Author: Diogo Ribeiro\n@Create Date: 2019-05-03\n@Update Date: 2019-05-03\n@Description: Implement of Adaptive Boosting\n\"\"\"\n\nimport numpy as np\nimport preProcess\nimport pickle\nimport random\nimport SVM\nimport math\n\nclass Adaboost:\n def __init__(self, norm_type=\"Normalization\", iterations=5, base_classifier=\"SVM\"):\n self.iterations = iterations\n self.norm_type = norm_type\n self.base_classifier = SVM.SVMClassifier()\n self.prediction = None\n self.probability = None\n self.classifier_set = None\n\n '''\n Function: baseClassifier\n Description: generate weak classifier\n Input: train_data dataType: ndarray description: train_data\n train_label dataType: ndarray description: train_label\n w dataType: ndarray description: weight\n Output: clf dataType: object description: weak classifier\n weighted_error dataType: float description: weighted error\n base_predictions dataType: object description: base predictions\n\n '''\n def baseClassifier(self, train_data, train_label, w):\n sample_num = len(train_data)\n error_index = np.ones([sample_num, 1])\n clf = self.base_classifier\n clf.train(train_data, train_label)\n base_predictions = np.sign(clf.predict(train_data))\n\n for i in range(sample_num):\n if base_predictions[i] == train_label[i]:\n error_index[i] = 0\n weighted_error = np.dot(w.T, error_index)\n return clf, weighted_error, base_predictions\n\n '''\n Function: updataAlpha\n Description: updata alpha\n Input: error dataType: float description: weighted error\n Output: new_alpha dataType: float description: new alpha\n '''\n def updateAlpha(self, error):\n temp = (1.0 - error)/max(error, 10e-6)\n new_alpha = 1/2 * math.log(temp, math.e)\n return new_alpha\n\n '''\n Function: train\n Description: train the model\n Input: train_data dataType: ndarray description: features\n train_label dataType: ndarray description: labels\n Output: clf_set dataType: list description: classifiers set\n '''\n def train(self, train_data, train_label):\n if self.norm_type == \"Standardization\":\n train_data = preProcess.Standardization(train_data)\n else:\n train_data = preProcess.Normalization(train_data)\n\n train_label = np.expand_dims(train_label, axis=1)\n sample_num = len(train_data)\n\n weak_classifier = []\n\n # initialize weights\n w = np.ones([sample_num, 1])\n w = w/sample_num\n\n # predictions\n agg_predicts = np.zeros([sample_num, 1]) # aggregate value of prediction\n\n # start train\n for i in range(self.iterations):\n base_clf, error, base_prediction = self.baseClassifier(train_data, train_label, w)\n alpha = self.updateAlpha(error)\n weak_classifier.append((alpha, base_clf))\n\n # update parameters in page of 139 Eq.(8.4)\n expon = np.multiply(-1 * alpha * train_label, base_prediction)\n w = np.multiply(w, np.exp(expon))\n w = w/w.sum()\n\n # calculate the total error rate\n agg_predicts += alpha*base_prediction\n error_rate = np.multiply(np.sign(agg_predicts) != train_label, np.ones([sample_num, 1]))\n error_rate = error_rate.sum()/sample_num\n\n if error_rate == 0:\n break\n self.classifier_set = weak_classifier\n return weak_classifier\n\n\n '''\n Function: predict\n Description: predict the testing set\n Input: train_data dataType: ndarray description: features\n prob dataType: bool description: return probaility of label\n Output: prediction dataType: ndarray description: the prediction results for testing set\n '''\n\n def predict(self, test_data, prob=\"False\"):\n # Normalization\n if self.norm_type == \"Standardization\":\n test_data = preProcess.Standardization(test_data)\n else:\n test_data = preProcess.Normalization(test_data)\n\n test_num = test_data.shape[0]\n prediction = np.zeros([test_num, 1])\n probability = np.zeros([test_num, 1])\n\n for classifier in self.classifier_set:\n alpha = classifier[0]\n clf = classifier[1]\n base_prediction = alpha * clf.predict(test_data)\n probability += base_prediction\n\n self.prediction = np.sign(probability)\n self.probability = probability\n if prob:\n return probability\n else:\n return prediction\n\n\n '''\n Function: accuracy\n Description: show detection result\n Input: test_label dataType: ndarray description: labels of test data\n Output: accuracy dataType: float description: detection accuarcy\n '''\n def accuarcy(self, test_label):\n test_label = np.expand_dims(test_label, axis=1)\n prediction = self.prediction\n accuarcy = sum(prediction == test_label)/len(test_label)\n return accuarcy\n\n\n '''\n Function: save\n Description: save the model as pkl\n Input: filename dataType: str description: the path to save model\n '''\n def save(self, filename):\n f = open(filename, 'w')\n pickle.dump(self.classifier_set, f)\n f.close()\n\n '''\n Function: load\n Description: load the model\n Input: filename dataType: str description: the path to save model\n Output: self dataType: obj description: the trained model\n '''\n def load(self, filename):\n f = open(filename)\n self.classifier_set = pickle.load(f)\n return self\n" ]
[ [ "numpy.ones", "numpy.multiply", "numpy.sign", "numpy.zeros", "numpy.exp", "numpy.expand_dims", "numpy.dot" ] ]
Sebastianvarv/rl-homework
[ "b7526ac3c86cbaae6b796856c31fc4c671a32663" ]
[ "hw1/run_expert.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nCode to load an expert policy and generate roll-out data for behavioral cloning.\nExample usage:\n python run_expert.py experts/Humanoid-v1.pkl Humanoid-v1 --render \\\n --num_rollouts 20\n\nAuthor of this script and included expert policies: Jonathan Ho ([email protected])\n\"\"\"\n\nimport os\nimport pickle\nimport tensorflow as tf\nimport numpy as np\nimport tf_util\nimport gym\nimport load_policy\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('expert_policy_file', type=str)\n parser.add_argument('envname', type=str)\n parser.add_argument('--render', action='store_true')\n parser.add_argument(\"--max_timesteps\", type=int)\n parser.add_argument('--num_rollouts', type=int, default=20,\n help='Number of expert roll outs')\n args = parser.parse_args()\n\n print('loading and building expert policy')\n policy_fn = load_policy.load_policy(args.expert_policy_file)\n print('loaded and built')\n\n with tf.Session():\n tf_util.initialize()\n\n import gym\n env = gym.make(args.envname)\n max_steps = args.max_timesteps or env.spec.timestep_limit\n\n returns = []\n observations = []\n actions = []\n for i in range(args.num_rollouts):\n print('iter', i)\n obs = env.reset()\n done = False\n totalr = 0.\n steps = 0\n while not done:\n action = policy_fn(obs[None,:])\n observations.append(obs)\n actions.append(action)\n obs, r, done, _ = env.step(action)\n totalr += r\n steps += 1\n if args.render:\n env.render()\n if steps % 100 == 0: print(\"%i/%i\"%(steps, max_steps))\n if steps >= max_steps:\n break\n returns.append(totalr)\n\n print('returns', returns)\n print('mean return', np.mean(returns))\n print('std of return', np.std(returns))\n\n expert_data = {'observations': np.array(observations),\n 'actions': np.array(actions)}\n #\n # with open(os.path.join('expert_data', args.envname + '.pkl'), 'wb') as f:\n # pickle.dump(expert_data, f\n # , pickle.HIGHEST_PROTOCOL)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.mean", "numpy.std", "numpy.array", "tensorflow.Session" ] ]
mmiller-max/clearml
[ "fd2d6c6f5d46cad3e406e88eeb4d805455b5b3d8" ]
[ "clearml/storage/helper.py" ]
[ "from __future__ import with_statement\n\nimport errno\nimport getpass\nimport itertools\nimport json\nimport os\nimport shutil\nimport sys\nimport threading\nfrom abc import ABCMeta, abstractmethod\nfrom collections import namedtuple\nfrom concurrent.futures import ThreadPoolExecutor\nfrom copy import copy\nfrom datetime import datetime\nfrom multiprocessing.pool import ThreadPool\nfrom tempfile import mktemp\nfrom time import time\nfrom types import GeneratorType\n\nimport requests\nimport six\nfrom _socket import gethostname\nfrom attr import attrs, attrib, asdict\nfrom furl import furl\nfrom pathlib2 import Path\nfrom requests.exceptions import ConnectionError\nfrom six import binary_type, StringIO\nfrom six.moves.queue import Queue, Empty\nfrom six.moves.urllib.parse import urlparse\nfrom six.moves.urllib.request import url2pathname\n\nfrom .callbacks import UploadProgressReport, DownloadProgressReport\nfrom .util import quote_url\nfrom ..backend_api.utils import get_http_session_with_retry\nfrom ..backend_config.bucket_config import S3BucketConfigurations, GSBucketConfigurations, AzureContainerConfigurations\nfrom ..config import config, deferred_config\nfrom ..debugging import get_logger\nfrom ..errors import UsageError\n\n\nclass StorageError(Exception):\n pass\n\n\nclass DownloadError(Exception):\n pass\n\n\[email protected]_metaclass(ABCMeta)\nclass _Driver(object):\n\n @classmethod\n def get_logger(cls):\n return get_logger('storage')\n\n @abstractmethod\n def get_container(self, container_name, config=None, **kwargs):\n pass\n\n @abstractmethod\n def test_upload(self, test_path, config, **kwargs):\n pass\n\n @abstractmethod\n def upload_object_via_stream(self, iterator, container, object_name, extra, **kwargs):\n pass\n\n @abstractmethod\n def list_container_objects(self, container, ex_prefix, **kwargs):\n pass\n\n @abstractmethod\n def get_direct_access(self, remote_path, **kwargs):\n pass\n\n @abstractmethod\n def download_object(self, obj, local_path, overwrite_existing, delete_on_failure, callback, **kwargs):\n pass\n\n @abstractmethod\n def download_object_as_stream(self, obj, chunk_size, **kwargs):\n pass\n\n @abstractmethod\n def delete_object(self, obj, **kwargs):\n pass\n\n @abstractmethod\n def upload_object(self, file_path, container, object_name, extra, **kwargs):\n pass\n\n @abstractmethod\n def get_object(self, container_name, object_name, **kwargs):\n pass\n\n\nclass StorageHelper(object):\n \"\"\" Storage helper.\n Used by the entire system to download/upload files.\n Supports both local and remote files (currently local files, network-mapped files, HTTP/S and Amazon S3)\n \"\"\"\n _temp_download_suffix = '.partially'\n\n @classmethod\n def _get_logger(cls):\n return get_logger('storage')\n\n @attrs\n class _PathSubstitutionRule(object):\n registered_prefix = attrib(type=str)\n local_prefix = attrib(type=str)\n replace_windows_sep = attrib(type=bool)\n replace_linux_sep = attrib(type=bool)\n\n path_substitution_config = 'storage.path_substitution'\n\n @classmethod\n def load_list_from_config(cls):\n rules_list = []\n for index, sub_config in enumerate(config.get(cls.path_substitution_config, list())):\n rule = cls(\n registered_prefix=sub_config.get('registered_prefix', None),\n local_prefix=sub_config.get('local_prefix', None),\n replace_windows_sep=sub_config.get('replace_windows_sep', False),\n replace_linux_sep=sub_config.get('replace_linux_sep', False),\n )\n\n if any(prefix is None for prefix in (rule.registered_prefix, rule.local_prefix)):\n StorageHelper._get_logger().warning(\n \"Illegal substitution rule configuration '{}[{}]': {}\".format(\n cls.path_substitution_config,\n index,\n asdict(rule),\n ))\n\n continue\n\n if all((rule.replace_windows_sep, rule.replace_linux_sep)):\n StorageHelper._get_logger().warning(\n \"Only one of replace_windows_sep and replace_linux_sep flags may be set.\"\n \"'{}[{}]': {}\".format(\n cls.path_substitution_config,\n index,\n asdict(rule),\n ))\n continue\n\n rules_list.append(rule)\n\n return rules_list\n\n class _UploadData(object):\n @property\n def src_path(self):\n return self._src_path\n\n @property\n def dest_path(self):\n return self._dest_path\n\n @property\n def extra(self):\n return self._extra\n\n @property\n def callback(self):\n return self._callback\n\n @property\n def retries(self):\n return self._retries\n\n def __init__(self, src_path, dest_path, extra, callback, retries):\n self._src_path = src_path\n self._dest_path = dest_path\n self._extra = extra\n self._callback = callback\n self._retries = retries\n\n def __str__(self):\n return \"src=%s\" % self.src_path\n\n _helpers = {} # cache of helper instances\n\n # global terminate event for async upload threads\n _terminate = threading.Event()\n _async_upload_threads = set()\n _upload_pool = None\n\n # collect all bucket credentials that aren't empty (ignore entries with an empty key or secret)\n _s3_configurations = deferred_config('aws.s3', {}, transform=S3BucketConfigurations.from_config)\n _gs_configurations = deferred_config('google.storage', {}, transform=GSBucketConfigurations.from_config)\n _azure_configurations = deferred_config('azure.storage', {}, transform=AzureContainerConfigurations.from_config)\n _path_substitutions = deferred_config(transform=_PathSubstitutionRule.load_list_from_config)\n\n @property\n def log(self):\n return self._log\n\n @property\n def scheme(self):\n return self._scheme\n\n @property\n def secure(self):\n return self._secure\n\n @property\n def base_url(self):\n return self._base_url\n\n @classmethod\n def get(cls, url, logger=None, **kwargs):\n \"\"\"\n Get a storage helper instance for the given URL\n\n :return: A StorageHelper instance.\n \"\"\"\n\n # Handle URL substitution etc before locating the correct storage driver\n url = cls._canonize_url(url)\n\n # Get the credentials we should use for this url\n base_url = cls._resolve_base_url(url)\n\n instance_key = '%s_%s' % (base_url, threading.current_thread().ident or 0)\n\n force_create = kwargs.pop('__force_create', False)\n if (instance_key in cls._helpers) and (not force_create):\n return cls._helpers[instance_key]\n\n # Don't canonize URL since we already did it\n try:\n instance = cls(base_url=base_url, url=url, logger=logger, canonize_url=False, **kwargs)\n except (StorageError, UsageError) as ex:\n cls._get_logger().error(str(ex))\n return None\n except Exception as ex:\n cls._get_logger().error(\"Failed creating storage object {} Reason: {}\".format(\n base_url or url, ex))\n return None\n\n cls._helpers[instance_key] = instance\n return instance\n\n @classmethod\n def get_local_copy(cls, remote_url):\n \"\"\"\n Download a file from remote URL to a local storage, and return path to local copy,\n\n :param remote_url: Remote URL. Example: https://example.com/file.jpg s3://bucket/folder/file.mp4 etc.\n :return: Path to local copy of the downloaded file. None if error occurred.\n \"\"\"\n helper = cls.get(remote_url)\n if not helper:\n return None\n # create temp file with the requested file name\n file_name = '.' + remote_url.split('/')[-1].split(os.path.sep)[-1]\n local_path = mktemp(suffix=file_name)\n return helper.download_to_file(remote_url, local_path)\n\n def __init__(self, base_url, url, key=None, secret=None, region=None, verbose=False, logger=None, retries=5,\n **kwargs):\n level = config.get('storage.log.level', None)\n\n if level:\n try:\n self._get_logger().setLevel(level)\n except (TypeError, ValueError):\n self._get_logger().error('invalid storage log level in configuration: %s' % level)\n\n self._log = logger or self._get_logger()\n self._verbose = verbose\n self._retries = retries\n self._extra = {}\n self._base_url = base_url\n self._secure = True\n self._driver = None\n self._container = None\n self._conf = None\n\n if kwargs.get('canonize_url', True):\n url = self._canonize_url(url)\n\n parsed = urlparse(url)\n self._scheme = parsed.scheme\n\n if self._scheme == _AzureBlobServiceStorageDriver.scheme:\n self._conf = copy(self._azure_configurations.get_config_by_uri(url))\n if self._conf is None:\n raise StorageError(\"Missing Azure Blob Storage configuration for {}\".format(url))\n\n if not self._conf.account_name or not self._conf.account_key:\n raise StorageError(\n \"Missing account name or key for Azure Blob Storage access for {}\".format(base_url)\n )\n\n self._driver = _AzureBlobServiceStorageDriver()\n self._container = self._driver.get_container(config=self._conf)\n\n elif self._scheme == _Boto3Driver.scheme:\n self._conf = copy(self._s3_configurations.get_config_by_uri(url))\n self._secure = self._conf.secure\n\n final_region = region if region else self._conf.region\n if not final_region:\n final_region = None\n\n self._conf.update(\n key=key or self._conf.key,\n secret=secret or self._conf.secret,\n multipart=self._conf.multipart,\n region=final_region,\n use_credentials_chain=self._conf.use_credentials_chain\n )\n\n if not self._conf.use_credentials_chain:\n if not self._conf.key or not self._conf.secret:\n raise ValueError(\n \"Missing key and secret for S3 storage access (%s)\" % base_url\n )\n\n self._driver = _Boto3Driver()\n self._container = self._driver.get_container(container_name=self._base_url, retries=retries,\n config=self._conf)\n\n elif self._scheme == _GoogleCloudStorageDriver.scheme:\n self._conf = copy(self._gs_configurations.get_config_by_uri(url))\n self._driver = _GoogleCloudStorageDriver()\n self._container = self._driver.get_container(\n container_name=self._base_url,\n config=self._conf\n )\n\n elif self._scheme in _HttpDriver.schemes:\n self._driver = _HttpDriver(retries=retries)\n self._container = self._driver.get_container(container_name=self._base_url)\n else: # elif self._scheme == 'file':\n # if this is not a known scheme assume local file\n\n # If the scheme is file, use only the path segment, If not, use the entire URL\n if self._scheme == 'file':\n url = parsed.path\n\n url = url.replace(\"\\\\\", \"/\")\n\n # url2pathname is specifically intended to operate on (urlparse result).path\n # and returns a cross-platform compatible result\n driver_uri = url2pathname(url)\n path_driver_uri = Path(driver_uri)\n # if path_driver_uri.is_file():\n # driver_uri = str(path_driver_uri.parent)\n # elif not path_driver_uri.exists():\n # # assume a folder and create\n # # Path(driver_uri).mkdir(parents=True, exist_ok=True)\n # pass\n\n self._driver = _FileStorageDriver(str(path_driver_uri.root))\n self._container = None\n\n @classmethod\n def terminate_uploads(cls, force=True, timeout=2.0):\n if force:\n # since async uploaders are daemon threads, we can just return and let them close by themselves\n return\n # signal all threads to terminate and give them a chance for 'timeout' seconds (total, not per-thread)\n cls._terminate.set()\n remaining_timeout = timeout\n for thread in cls._async_upload_threads:\n t = time()\n try:\n thread.join(timeout=remaining_timeout)\n except Exception:\n pass\n remaining_timeout -= (time() - t)\n\n @classmethod\n def get_configuration(cls, bucket_config):\n return cls._s3_configurations.get_config_by_bucket(bucket_config.bucket, bucket_config.host)\n\n @classmethod\n def add_configuration(cls, bucket_config, log=None, _test_config=True):\n # Try to use existing configuration if we have no key and secret\n use_existing = not bucket_config.is_valid()\n\n # Get existing config anyway (we'll either try to use it or alert we're replacing it\n existing = cls.get_configuration(bucket_config)\n\n configs = cls._s3_configurations\n\n if not use_existing:\n # Test bucket config, fails if unsuccessful\n if _test_config:\n _Boto3Driver._test_bucket_config(bucket_config, log)\n\n if existing:\n if log:\n log.warning('Overriding existing configuration for %s/%s'\n % (existing.host or 'AWS', existing.bucket))\n configs.remove_config(existing)\n else:\n # Try to use existing configuration\n good_config = False\n if existing:\n if log:\n log.info('Using existing credentials for bucket %s/%s'\n % (bucket_config.host or 'AWS', bucket_config.bucket))\n good_config = _Boto3Driver._test_bucket_config(existing, log, raise_on_error=False)\n\n if not good_config:\n # Try to use global key/secret\n configs.update_config_with_defaults(bucket_config)\n\n if log:\n log.info('Using global credentials for bucket %s/%s'\n % (bucket_config.host or 'AWS', bucket_config.bucket))\n if _test_config:\n _Boto3Driver._test_bucket_config(bucket_config, log)\n else:\n # do not add anything, existing config is OK\n return\n\n configs.add_config(bucket_config)\n\n @classmethod\n def add_path_substitution(\n cls,\n registered_prefix,\n local_prefix,\n replace_windows_sep=False,\n replace_linux_sep=False,\n ):\n \"\"\"\n Add a path substitution rule for storage paths.\n\n Useful for case where the data was registered under some path, and that\n path was later renamed. This may happen with local storage paths where\n each machine is has different mounts or network drives configurations\n\n :param registered_prefix: The prefix to search for and replace. This is\n the prefix of the path the data is registered under. This should be the\n exact url prefix, case sensitive, as the data is registered.\n :param local_prefix: The prefix to replace 'registered_prefix' with. This\n is the prefix of the path the data is actually saved under. This should be the\n exact url prefix, case sensitive, as the data is saved under.\n :param replace_windows_sep: If set to True, and the prefix matches, the rest\n of the url has all of the windows path separators (backslash '\\') replaced with\n the native os path separator.\n :param replace_linux_sep: If set to True, and the prefix matches, the rest\n of the url has all of the linux/unix path separators (slash '/') replaced with\n the native os path separator.\n \"\"\"\n\n if not registered_prefix or not local_prefix:\n raise UsageError(\"Path substitution prefixes must be non empty strings\")\n\n if replace_windows_sep and replace_linux_sep:\n raise UsageError(\"Only one of replace_windows_sep and replace_linux_sep may be set.\")\n\n rule = cls._PathSubstitutionRule(\n registered_prefix=registered_prefix,\n local_prefix=local_prefix,\n replace_windows_sep=replace_windows_sep,\n replace_linux_sep=replace_linux_sep,\n )\n\n cls._path_substitutions.append(rule)\n\n @classmethod\n def clear_path_substitutions(cls):\n \"\"\"\n Removes all path substitution rules, including ones from the configuration file.\n \"\"\"\n cls._path_substitutions = list()\n\n def verify_upload(self, folder_uri='', raise_on_error=True, log_on_error=True):\n \"\"\"\n Verify that this helper can upload files to a folder.\n\n An upload is possible iff:\n 1. the destination folder is under the base uri of the url used to create the helper\n 2. the helper has credentials to write to the destination folder\n\n :param folder_uri: The destination folder to test. Must be an absolute\n url that begins with the base uri of the url used to create the helper.\n :param raise_on_error: Raise an exception if an upload is not possible\n :param log_on_error: Log an error if an upload is not possible\n :return: True, if, and only if, an upload to folder_uri is possible.\n \"\"\"\n\n folder_uri = self._canonize_url(folder_uri)\n\n folder_uri = self.conform_url(folder_uri, self._base_url)\n\n test_path = self._normalize_object_name(folder_uri)\n\n if self._scheme == _Boto3Driver.scheme:\n _Boto3Driver._test_bucket_config(\n self._conf,\n self._log,\n test_path=test_path,\n raise_on_error=raise_on_error,\n log_on_error=log_on_error,\n )\n elif self._scheme == _GoogleCloudStorageDriver.scheme:\n self._driver.test_upload(test_path, self._conf)\n\n elif self._scheme == 'file':\n # Check path exists\n Path(test_path).mkdir(parents=True, exist_ok=True)\n # check path permissions\n Path(test_path).touch(exist_ok=True)\n\n return folder_uri\n\n def upload_from_stream(self, stream, dest_path, extra=None, retries=1):\n dest_path = self._canonize_url(dest_path)\n object_name = self._normalize_object_name(dest_path)\n extra = extra.copy() if extra else {}\n extra.update(self._extra)\n last_ex = None\n cb = UploadProgressReport.from_stream(stream, object_name, self._verbose, self._log)\n for i in range(max(1, retries)):\n try:\n self._driver.upload_object_via_stream(\n iterator=stream,\n container=self._container,\n object_name=object_name,\n callback=cb,\n extra=extra)\n last_ex = None\n break\n except Exception as ex:\n last_ex = ex\n # seek to beginning if possible\n # noinspection PyBroadException\n try:\n stream.seek(0)\n except Exception:\n pass\n if last_ex:\n raise last_ex\n\n if self.scheme in _HttpDriver.schemes:\n # quote link\n dest_path = quote_url(dest_path)\n\n return dest_path\n\n def upload(self, src_path, dest_path=None, extra=None, async_enable=False, cb=None, retries=1):\n if not dest_path:\n dest_path = os.path.basename(src_path)\n\n dest_path = self._canonize_url(dest_path)\n\n if cb and self.scheme in _HttpDriver.schemes:\n # store original callback\n a_cb = cb\n\n # quote link\n def callback(a_path):\n return a_cb(quote_url(a_path) if a_path else a_path)\n # replace callback with wrapper\n cb = callback\n\n if async_enable:\n data = self._UploadData(src_path=src_path, dest_path=dest_path, extra=extra, callback=cb, retries=retries)\n StorageHelper._initialize_upload_pool()\n return StorageHelper._upload_pool.apply_async(self._do_async_upload, args=(data,))\n else:\n res = self._do_upload(src_path, dest_path, extra, cb, verbose=False, retries=retries)\n if res:\n res = quote_url(res)\n return res\n\n def list(self, prefix=None):\n \"\"\"\n List entries in the helper base path.\n\n Return a list of names inside this helper base path. The base path is\n determined at creation time and is specific for each storage medium.\n For Google Storage and S3 it is the bucket of the path.\n For local files it is the root directory.\n\n This operation is not supported for http and https protocols.\n\n :param prefix: If None, return the list as described above. If not, it\n must be a string - the path of a sub directory under the base path.\n the returned list will include only objects under that subdir.\n\n :return: The paths of all the objects in the storage base\n path under prefix. Listed relative to the base path.\n\n \"\"\"\n\n if prefix:\n if prefix.startswith(self._base_url):\n prefix = prefix[len(self.base_url):].lstrip(\"/\")\n\n try:\n res = self._driver.list_container_objects(self._container, ex_prefix=prefix)\n except TypeError:\n res = self._driver.list_container_objects(self._container)\n\n return [\n obj.name\n for obj in res if\n obj.name.startswith(prefix) and obj.name != prefix\n ]\n else:\n return [obj.name for obj in self._driver.list_container_objects(self._container)]\n\n def download_to_file(self, remote_path, local_path, overwrite_existing=False, delete_on_failure=True, verbose=None):\n def next_chunk(astream):\n if isinstance(astream, binary_type):\n chunk = astream\n astream = None\n elif astream:\n try:\n chunk = next(astream)\n except StopIteration:\n chunk = None\n else:\n chunk = None\n return chunk, astream\n\n remote_path = self._canonize_url(remote_path)\n verbose = self._verbose if verbose is None else verbose\n\n # Check if driver type supports direct access:\n direct_access_path = self._driver.get_direct_access(remote_path)\n if direct_access_path:\n return direct_access_path\n\n temp_local_path = None\n try:\n if verbose:\n self._log.info('Start downloading from %s' % remote_path)\n if not overwrite_existing and Path(local_path).is_file():\n self._log.warning(\n 'File {} already exists, no need to download, thread id = {}'.format(\n local_path,\n threading.current_thread().ident,\n ),\n )\n\n return local_path\n # we download into temp_local_path so that if we accidentally stop in the middle,\n # we won't think we have the entire file\n temp_local_path = '{}_{}{}'.format(local_path, time(), self._temp_download_suffix)\n obj = self._get_object(remote_path)\n if not obj:\n return None\n\n # object size in bytes\n total_size_mb = -1\n dl_total_mb = 0.\n download_reported = False\n # chunks size is ignored and always 5Mb\n chunk_size_mb = 5\n\n # make sure we have the destination folder\n # noinspection PyBroadException\n Path(temp_local_path).parent.mkdir(parents=True, exist_ok=True)\n\n # try to get file size\n try:\n if isinstance(self._driver, _HttpDriver) and obj:\n obj = self._driver._get_download_object(obj)\n total_size_mb = float(obj.headers.get('Content-Length', 0)) / (1024 * 1024)\n elif hasattr(obj, 'size'):\n size = obj.size\n # Google storage has the option to reload the object to get the size\n if size is None and hasattr(obj, 'reload'):\n obj.reload()\n size = obj.size\n\n total_size_mb = 0 if size is None else float(size) / (1024 * 1024)\n elif hasattr(obj, 'content_length'):\n total_size_mb = float(obj.content_length) / (1024 * 1024)\n except (ValueError, AttributeError, KeyError):\n pass\n\n # if driver supports download with callback, use it (it might be faster)\n if hasattr(self._driver, 'download_object'):\n # callback\n cb = DownloadProgressReport(total_size_mb, verbose, remote_path, self._log)\n self._driver.download_object(obj, temp_local_path, callback=cb)\n download_reported = bool(cb.last_reported)\n dl_total_mb = cb.current_status_mb\n else:\n stream = self._driver.download_object_as_stream(obj, chunk_size_mb * 1024 * 1024)\n if stream is None:\n raise ValueError('Could not download %s' % remote_path)\n with open(temp_local_path, 'wb') as fd:\n data, stream = next_chunk(stream)\n while data:\n fd.write(data)\n data, stream = next_chunk(stream)\n\n if Path(temp_local_path).stat().st_size <= 0:\n raise Exception('downloaded a 0-sized file')\n\n # if we are on windows, we need to remove the target file before renaming\n # otherwise posix rename will overwrite the target\n if os.name != 'posix':\n try:\n os.remove(local_path)\n except Exception:\n pass\n\n # rename temp file to local_file\n # noinspection PyBroadException\n try:\n os.rename(temp_local_path, local_path)\n except Exception:\n # noinspection PyBroadException\n try:\n os.unlink(temp_local_path)\n except Exception:\n pass\n # file was downloaded by a parallel process, check we have the final output and delete the partial copy\n path_local_path = Path(local_path)\n if not path_local_path.is_file() or path_local_path.stat().st_size <= 0:\n raise Exception('Failed renaming partial file, downloaded file exists and a 0-sized file')\n\n # report download if we are on the second chunk\n if verbose or download_reported:\n self._log.info(\n 'Downloaded %.2f MB successfully from %s , saved to %s' % (dl_total_mb, remote_path, local_path))\n return local_path\n except DownloadError:\n raise\n except Exception as e:\n self._log.error(\"Could not download {} , err: {} \".format(remote_path, e))\n if delete_on_failure:\n # noinspection PyBroadException\n try:\n if temp_local_path:\n os.remove(temp_local_path)\n except Exception:\n pass\n return None\n\n def download_as_stream(self, remote_path, chunk_size=None):\n remote_path = self._canonize_url(remote_path)\n try:\n obj = self._get_object(remote_path)\n return self._driver.download_object_as_stream(\n obj, chunk_size=chunk_size, verbose=self._verbose, log=self.log\n )\n except DownloadError:\n raise\n except Exception as e:\n self._log.error(\"Could not download file : %s, err:%s \" % (remote_path, str(e)))\n return None\n\n def download_as_nparray(self, remote_path, chunk_size=None):\n try:\n stream = self.download_as_stream(remote_path, chunk_size)\n if stream is None:\n return\n\n # TODO: ugly py3 hack, please remove ASAP\n if six.PY3 and not isinstance(stream, GeneratorType):\n import numpy as np\n return np.frombuffer(stream, dtype=np.uint8)\n else:\n import numpy as np\n return np.asarray(bytearray(b''.join(stream)), dtype=np.uint8)\n\n except Exception as e:\n self._log.error(\"Could not download file : %s, err:%s \" % (remote_path, str(e)))\n\n def delete(self, path):\n return self._driver.delete_object(self._get_object(path))\n\n def check_write_permissions(self, dest_path=None):\n # create a temporary file, then delete it\n base_url = dest_path or self._base_url\n dest_path = base_url + '/.clearml.test'\n # do not check http/s connection permissions\n if dest_path.startswith('http'):\n return True\n try:\n self.upload_from_stream(stream=six.BytesIO(b'clearml'), dest_path=dest_path)\n self.delete(path=dest_path)\n except Exception:\n raise ValueError('Insufficient permissions for {}'.format(base_url))\n return True\n\n @classmethod\n def download_from_url(cls, remote_url, local_path, overwrite_existing=False):\n \"\"\"\n Download a file from remote URL to a local storage\n\n :param remote_url: Remote URL. Example: https://example.com/image.jpg or s3://bucket/folder/file.mp4 etc.\n :param local_path: target location for downloaded file. Example: /tmp/image.jpg\n :param overwrite_existing: If True and local_path exists, it will overwrite it, otherwise print warning\n :return: local_path if download was successful.\n \"\"\"\n helper = cls.get(remote_url)\n if not helper:\n return None\n return helper.download_to_file(remote_url, local_path, overwrite_existing=overwrite_existing)\n\n @classmethod\n def _canonize_url(cls, url):\n return cls._apply_url_substitutions(url)\n\n @classmethod\n def _apply_url_substitutions(cls, url):\n def replace_separator(_url, where, sep):\n return _url[:where] + _url[where:].replace(sep, os.sep)\n\n for index, rule in enumerate(cls._path_substitutions):\n if url.startswith(rule.registered_prefix):\n url = url.replace(\n rule.registered_prefix,\n rule.local_prefix,\n 1, # count. str.replace() does not support keyword arguments\n )\n\n if rule.replace_windows_sep:\n url = replace_separator(url, len(rule.local_prefix), '\\\\')\n\n if rule.replace_linux_sep:\n url = replace_separator(url, len(rule.local_prefix), '/')\n\n break\n\n return url\n\n @classmethod\n def _resolve_base_url(cls, base_url):\n parsed = urlparse(base_url)\n if parsed.scheme == _Boto3Driver.scheme:\n conf = cls._s3_configurations.get_config_by_uri(base_url)\n bucket = conf.bucket\n if not bucket:\n parts = Path(parsed.path.strip('/')).parts\n if parts:\n bucket = parts[0]\n return '/'.join(x for x in ('s3:/', conf.host, bucket) if x)\n elif parsed.scheme == _AzureBlobServiceStorageDriver.scheme:\n conf = cls._azure_configurations.get_config_by_uri(base_url)\n if not conf:\n raise StorageError(\"Can't find azure configuration for {}\".format(base_url))\n return str(furl(base_url).set(path=conf.container_name))\n elif parsed.scheme == _GoogleCloudStorageDriver.scheme:\n conf = cls._gs_configurations.get_config_by_uri(base_url)\n return str(furl(scheme=parsed.scheme, netloc=conf.bucket))\n elif parsed.scheme == 'http':\n return 'http://'\n elif parsed.scheme == 'https':\n return 'https://'\n else: # if parsed.scheme == 'file':\n # if we do not know what it is, we assume file\n return 'file://'\n\n @classmethod\n def conform_url(cls, folder_uri, base_url=None):\n if not folder_uri:\n return folder_uri\n _base_url = cls._resolve_base_url(folder_uri) if not base_url else base_url\n\n if not folder_uri.startswith(_base_url):\n prev_folder_uri = folder_uri\n if _base_url == 'file://':\n folder_uri = str(Path(folder_uri).absolute())\n if folder_uri.startswith('/'):\n folder_uri = _base_url + folder_uri\n else:\n folder_uri = '/'.join((_base_url, folder_uri))\n\n cls._get_logger().debug('Upload destination {} amended to {} for registration purposes'.format(\n prev_folder_uri, folder_uri))\n else:\n raise ValueError('folder_uri: {} does not start with base url: {}'.format(folder_uri, _base_url))\n\n return folder_uri\n\n def _absolute_object_name(self, path):\n \"\"\" Returns absolute remote path, including any prefix that is handled by the container \"\"\"\n if not path.startswith(self.base_url):\n return self.base_url.rstrip('/') + '///' + path.lstrip('/')\n return path\n\n def _normalize_object_name(self, path):\n \"\"\" Normalize remote path. Remove any prefix that is already handled by the container \"\"\"\n if path.startswith(self.base_url):\n path = path[len(self.base_url):]\n if path.startswith('/') and os.name == 'nt':\n path = path[1:]\n if self.scheme in (_Boto3Driver.scheme, _GoogleCloudStorageDriver.scheme,\n _AzureBlobServiceStorageDriver.scheme):\n path = path.lstrip('/')\n return path\n\n def _do_async_upload(self, data):\n assert isinstance(data, self._UploadData)\n return self._do_upload(data.src_path, data.dest_path, extra=data.extra, cb=data.callback,\n verbose=True, retries=data.retries)\n\n def _upload_from_file(self, local_path, dest_path, extra=None):\n if not hasattr(self._driver, 'upload_object'):\n with open(local_path, 'rb') as stream:\n res = self.upload_from_stream(stream=stream, dest_path=dest_path, extra=extra)\n else:\n object_name = self._normalize_object_name(dest_path)\n extra = extra.copy() if extra else {}\n extra.update(self._extra)\n cb = UploadProgressReport.from_file(local_path, self._verbose, self._log)\n res = self._driver.upload_object(\n file_path=local_path,\n container=self._container,\n object_name=object_name,\n callback=cb,\n extra=extra)\n return res\n\n def _do_upload(self, src_path, dest_path, extra=None, cb=None, verbose=False, retries=1):\n object_name = self._normalize_object_name(dest_path)\n if cb:\n try:\n cb(None)\n except Exception as e:\n self._log.error(\"Calling upload callback when starting upload: %s\" % str(e))\n if verbose:\n msg = 'Starting upload: {} => {}{}'.format(\n src_path,\n (self._container.name if self._container.name.endswith('/') else self._container.name + '/')\n if self._container and self._container.name else '', object_name)\n if object_name.startswith('file://') or object_name.startswith('/'):\n self._log.debug(msg)\n else:\n self._log.info(msg)\n last_ex = None\n for i in range(max(1, retries)):\n try:\n if not self._upload_from_file(local_path=src_path, dest_path=dest_path, extra=extra):\n # retry if failed\n last_ex = ValueError(\"Upload failed\")\n continue\n last_ex = None\n break\n except Exception as e:\n last_ex = e\n\n if last_ex:\n self._log.error(\"Exception encountered while uploading %s\" % str(last_ex))\n if cb:\n try:\n cb(False)\n except Exception as e:\n self._log.warning(\"Exception on upload callback: %s\" % str(e))\n raise last_ex\n\n if verbose:\n self._log.debug(\"Finished upload: %s => %s\" % (src_path, object_name))\n if cb:\n try:\n cb(dest_path)\n except Exception as e:\n self._log.warning(\"Exception on upload callback: %s\" % str(e))\n\n return dest_path\n\n def _get_object(self, path):\n object_name = self._normalize_object_name(path)\n try:\n return self._driver.get_object(\n container_name=self._container.name if self._container else '', object_name=object_name)\n except ConnectionError:\n raise DownloadError\n except Exception as e:\n self.log.warning('Storage helper problem for {}: {}'.format(str(object_name), str(e)))\n return None\n\n @staticmethod\n def _initialize_upload_pool():\n if not StorageHelper._upload_pool:\n StorageHelper._upload_pool = ThreadPool(processes=1)\n\n @staticmethod\n def close_async_threads():\n if StorageHelper._upload_pool:\n pool = StorageHelper._upload_pool\n StorageHelper._upload_pool = None\n # noinspection PyBroadException\n try:\n pool.terminate()\n pool.join()\n except Exception:\n pass\n\n\nclass _HttpDriver(_Driver):\n \"\"\" LibCloud http/https adapter (simple, enough for now) \"\"\"\n\n timeout = (5.0, 30.)\n min_kbps_speed = 50\n\n schemes = ('http', 'https')\n\n class _Container(object):\n _default_backend_session = None\n _default_files_server_host = None\n\n def __init__(self, name, retries=5, **kwargs):\n self.name = name\n self.session = get_http_session_with_retry(total=retries, connect=retries, read=retries, redirect=retries)\n\n def get_headers(self, url):\n if not self._default_backend_session:\n from ..backend_interface.base import InterfaceBase\n self._default_backend_session = InterfaceBase._get_default_session()\n if self._default_files_server_host is None:\n self._default_files_server_host = self._default_backend_session.get_files_server_host().rstrip('/')\n\n if url == self._default_files_server_host or url.startswith(self._default_files_server_host + '/'):\n return self._default_backend_session.add_auth_headers({})\n return None\n\n class _HttpSessionHandle(object):\n def __init__(self, url, is_stream, container_name, object_name):\n self.url, self.is_stream, self.container_name, self.object_name = \\\n url, is_stream, container_name, object_name\n\n def __init__(self, retries=5):\n self._retries = retries\n self._containers = {}\n\n def get_container(self, container_name, config=None, **kwargs):\n if container_name not in self._containers:\n self._containers[container_name] = self._Container(name=container_name, retries=self._retries, **kwargs)\n return self._containers[container_name]\n\n def upload_object_via_stream(self, iterator, container, object_name, extra=None, callback=None, **kwargs):\n url = object_name[:object_name.index('/')]\n url_path = object_name[len(url) + 1:]\n full_url = container.name + url\n # when sending data in post, there is no connection timeout, just an entire upload timeout\n timeout = self.timeout[-1]\n stream_size = 0\n if hasattr(iterator, 'tell') and hasattr(iterator, 'seek'):\n pos = iterator.tell()\n iterator.seek(0, 2)\n stream_size = iterator.tell() - pos\n iterator.seek(pos, 0)\n timeout = max(timeout, (stream_size / 1024) / float(self.min_kbps_speed))\n\n res = container.session.post(full_url, files={url_path: iterator}, timeout=timeout,\n headers=container.get_headers(full_url))\n if res.status_code != requests.codes.ok:\n raise ValueError('Failed uploading object %s (%d): %s' % (object_name, res.status_code, res.text))\n\n # call back is useless because we are not calling it while uploading...\n\n # if callback and stream_size:\n # try:\n # callback(stream_size)\n # except Exception as ex:\n # log.debug('Exception raised when running callback function: %s' % ex)\n return res\n\n def list_container_objects(self, *args, **kwargs):\n raise NotImplementedError('List is not implemented for http protocol')\n\n def delete_object(self, obj, *args, **kwargs):\n assert isinstance(obj, self._HttpSessionHandle)\n container = self._containers[obj.container_name]\n res = container.session.delete(obj.url, headers=container.get_headers(obj.url))\n if res.status_code != requests.codes.ok:\n self._get_logger().warning('Failed deleting object %s (%d): %s' % (\n obj.object_name, res.status_code, res.text))\n return False\n return True\n\n def get_object(self, container_name, object_name, *args, **kwargs):\n is_stream = kwargs.get('stream', True)\n url = ''.join((container_name, object_name.lstrip('/')))\n return self._HttpSessionHandle(url, is_stream, container_name, object_name)\n\n def _get_download_object(self, obj):\n # bypass for session result\n if not isinstance(obj, self._HttpSessionHandle):\n return obj\n\n container = self._containers[obj.container_name]\n # set stream flag before we send the request\n container.session.stream = obj.is_stream\n res = container.session.get(obj.url, timeout=self.timeout, headers=container.get_headers(obj.url))\n if res.status_code != requests.codes.ok:\n raise ValueError('Failed getting object %s (%d): %s' % (obj.object_name, res.status_code, res.text))\n return res\n\n def download_object_as_stream(self, obj, chunk_size=64 * 1024, **_):\n # return iterable object\n obj = self._get_download_object(obj)\n return obj.iter_content(chunk_size=chunk_size)\n\n def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):\n obj = self._get_download_object(obj)\n p = Path(local_path)\n if not overwrite_existing and p.is_file():\n self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))\n return\n length = 0\n with p.open(mode='wb') as f:\n for chunk in obj.iter_content(chunk_size=5 * 1024 * 1024):\n # filter out keep-alive new chunks\n if not chunk:\n continue\n chunk_size = len(chunk)\n f.write(chunk)\n length += chunk_size\n if callback:\n callback(chunk_size)\n\n return length\n\n def get_direct_access(self, remote_path, **_):\n return None\n\n def test_upload(self, test_path, config, **kwargs):\n return True\n\n def upload_object(self, file_path, container, object_name, extra, callback=None, **kwargs):\n with open(file_path, 'rb') as stream:\n return self.upload_object_via_stream(iterator=stream, container=container,\n object_name=object_name, extra=extra, callback=callback, **kwargs)\n\n\nclass _Stream(object):\n encoding = None\n mode = 'rw'\n name = ''\n newlines = '\\n'\n softspace = False\n\n def __init__(self, input_iterator=None):\n self.closed = False\n self._buffer = Queue()\n self._input_iterator = input_iterator\n self._leftover = None\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return self.next()\n\n def close(self):\n self.closed = True\n\n def flush(self):\n pass\n\n def fileno(self):\n return 87\n\n def isatty(self):\n return False\n\n def next(self):\n while not self.closed or not self._buffer.empty():\n # input stream\n if self._input_iterator:\n try:\n chunck = next(self._input_iterator)\n return chunck\n except StopIteration:\n self.closed = True\n raise StopIteration()\n except Exception as ex:\n _Driver.get_logger().error('Failed downloading: %s' % ex)\n else:\n # in/out stream\n try:\n return self._buffer.get(block=True, timeout=1.)\n except Empty:\n pass\n\n raise StopIteration()\n\n def read(self, size=None):\n try:\n data = self.next() if self._leftover is None else self._leftover\n except StopIteration:\n return six.b('')\n\n self._leftover = None\n try:\n while size is None or not data or len(data) < size:\n chunk = self.next()\n if chunk is not None:\n if data is not None:\n data += chunk\n else:\n data = chunk\n except StopIteration:\n pass\n\n if size is not None and data and len(data) > size:\n self._leftover = data[size:]\n return data[:size]\n\n return data\n\n def readline(self, size=None):\n return self.read(size)\n\n def readlines(self, sizehint=None):\n pass\n\n def truncate(self, size=None):\n pass\n\n def write(self, bytes):\n self._buffer.put(bytes, block=True)\n\n def writelines(self, sequence):\n for s in sequence:\n self.write(s)\n\n\nclass _Boto3Driver(_Driver):\n \"\"\" Boto3 storage adapter (simple, enough for now) \"\"\"\n\n _min_pool_connections = 512\n _max_multipart_concurrency = deferred_config('aws.boto3.max_multipart_concurrency', 16)\n _pool_connections = deferred_config('aws.boto3.pool_connections', 512)\n\n _stream_download_pool_connections = 128\n _stream_download_pool = None\n\n _containers = {}\n\n scheme = 's3'\n scheme_prefix = str(furl(scheme=scheme, netloc=''))\n\n _bucket_location_failure_reported = set()\n\n class _Container(object):\n _creation_lock = threading.Lock()\n\n def __init__(self, name, cfg):\n try:\n import boto3\n import botocore.client\n from botocore.exceptions import ClientError # noqa: F401\n except ImportError:\n raise UsageError(\n 'AWS S3 storage driver (boto3) not found. '\n 'Please install driver using: pip install \\\"boto3>=1.9\\\"'\n )\n\n # skip 's3://'\n self.name = name[5:]\n endpoint = (('https://' if cfg.secure else 'http://') + cfg.host) if cfg.host else None\n\n # boto3 client creation isn't thread-safe (client itself is)\n with self._creation_lock:\n boto_kwargs = {\n \"endpoint_url\": endpoint,\n \"use_ssl\": cfg.secure,\n \"verify\": cfg.verify,\n \"config\": botocore.client.Config(\n max_pool_connections=max(\n _Boto3Driver._min_pool_connections,\n _Boto3Driver._pool_connections)\n )\n }\n if not cfg.use_credentials_chain:\n boto_kwargs[\"aws_access_key_id\"] = cfg.key\n boto_kwargs[\"aws_secret_access_key\"] = cfg.secret\n\n self.resource = boto3.resource(\n 's3',\n **boto_kwargs\n )\n\n self.config = cfg\n bucket_name = self.name[len(cfg.host) + 1:] if cfg.host else self.name\n self.bucket = self.resource.Bucket(bucket_name)\n\n @attrs\n class ListResult(object):\n name = attrib(default=None)\n\n def __init__(self):\n pass\n\n def _get_stream_download_pool(self):\n if self._stream_download_pool is None:\n self._stream_download_pool = ThreadPoolExecutor(max_workers=self._stream_download_pool_connections)\n return self._stream_download_pool\n\n def get_container(self, container_name, config=None, **kwargs):\n if container_name not in self._containers:\n self._containers[container_name] = self._Container(name=container_name, cfg=config)\n self._containers[container_name].config.retries = kwargs.get('retries', 5)\n return self._containers[container_name]\n\n def upload_object_via_stream(self, iterator, container, object_name, callback=None, extra=None, **kwargs):\n import boto3.s3.transfer\n stream = _Stream(iterator)\n try:\n container.bucket.upload_fileobj(stream, object_name, Config=boto3.s3.transfer.TransferConfig(\n use_threads=container.config.multipart,\n max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,\n num_download_attempts=container.config.retries),\n Callback=callback,\n )\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n return True\n\n def upload_object(self, file_path, container, object_name, callback=None, extra=None, **kwargs):\n import boto3.s3.transfer\n try:\n container.bucket.upload_file(file_path, object_name, Config=boto3.s3.transfer.TransferConfig(\n use_threads=container.config.multipart,\n max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,\n num_download_attempts=container.config.retries),\n Callback=callback)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n return True\n\n def list_container_objects(self, container, ex_prefix=None, **kwargs):\n if ex_prefix:\n res = container.bucket.objects.filter(Prefix=ex_prefix)\n else:\n res = container.bucket.objects.all()\n for res in res:\n yield self.ListResult(name=res.key)\n\n def delete_object(self, object, **kwargs):\n from botocore.exceptions import ClientError\n object.delete()\n try:\n # Try loading the file to verify deletion\n object.load()\n return False\n except ClientError as e:\n return int(e.response['Error']['Code']) == 404\n\n def get_object(self, container_name, object_name, *args, **kwargs):\n full_container_name = 's3://' + container_name\n container = self._containers[full_container_name]\n obj = container.resource.Object(container.bucket.name, object_name)\n obj.container_name = full_container_name\n return obj\n\n def download_object_as_stream(self, obj, chunk_size=64 * 1024, verbose=None, log=None, **_):\n def async_download(a_obj, a_stream, cb, cfg):\n try:\n a_obj.download_fileobj(a_stream, Callback=cb, Config=cfg)\n except Exception as ex:\n (log or self.get_logger()).error('Failed downloading: %s' % ex)\n a_stream.close()\n\n import boto3.s3.transfer\n # return iterable object\n stream = _Stream()\n container = self._containers[obj.container_name]\n config = boto3.s3.transfer.TransferConfig(\n use_threads=container.config.multipart,\n max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,\n num_download_attempts=container.config.retries)\n total_size_mb = obj.content_length / (1024. * 1024.)\n remote_path = os.path.join(obj.container_name, obj.key)\n cb = DownloadProgressReport(total_size_mb, verbose, remote_path, log)\n self._get_stream_download_pool().submit(async_download, obj, stream, cb, config)\n\n return stream\n\n def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):\n import boto3.s3.transfer\n p = Path(local_path)\n if not overwrite_existing and p.is_file():\n self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))\n return\n container = self._containers[obj.container_name]\n obj.download_file(str(p),\n Callback=callback,\n Config=boto3.s3.transfer.TransferConfig(\n use_threads=container.config.multipart,\n max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,\n num_download_attempts=container.config.retries))\n\n @classmethod\n def _test_bucket_config(cls, conf, log, test_path='', raise_on_error=True, log_on_error=True):\n try:\n import boto3\n from botocore.exceptions import ClientError\n except ImportError:\n return False\n\n if not conf.bucket:\n return False\n try:\n if not conf.is_valid():\n raise Exception('Missing credentials')\n\n fullname = furl(conf.bucket).add(path=test_path).add(path='%s-upload_test' % cls.__module__)\n bucket_name = str(fullname.path.segments[0])\n filename = str(furl(path=fullname.path.segments[1:]))\n\n data = {\n 'user': getpass.getuser(),\n 'machine': gethostname(),\n 'time': datetime.utcnow().isoformat()\n }\n\n boto_session = boto3.Session(conf.key, conf.secret)\n boto_resource = boto_session.resource('s3', conf.region)\n bucket = boto_resource.Bucket(bucket_name)\n bucket.put_object(Key=filename, Body=six.b(json.dumps(data)))\n\n region = cls._get_bucket_region(conf=conf, log=log, report_info=True)\n\n if region and ((conf.region and region != conf.region) or (not conf.region and region != 'us-east-1')):\n msg = \"incorrect region specified for bucket %s (detected region %s)\" % (conf.bucket, region)\n else:\n return True\n\n except ClientError as ex:\n msg = ex.response['Error']['Message']\n if log_on_error and log:\n log.error(msg)\n\n if raise_on_error:\n raise\n\n except Exception as ex:\n msg = str(ex)\n if log_on_error and log:\n log.error(msg)\n\n if raise_on_error:\n raise\n\n msg = (\"Failed testing access to bucket %s: \" % conf.bucket) + msg\n\n if log_on_error and log:\n log.error(msg)\n\n if raise_on_error:\n raise StorageError(msg)\n\n return False\n\n @classmethod\n def _get_bucket_region(cls, conf, log=None, report_info=False):\n import boto3\n from botocore.exceptions import ClientError\n\n if not conf.bucket:\n return None\n\n def report(msg):\n if log and conf.get_bucket_host() not in cls._bucket_location_failure_reported:\n if report_info:\n log.debug(msg)\n else:\n log.warning(msg)\n cls._bucket_location_failure_reported.add(conf.get_bucket_host())\n\n try:\n boto_session = boto3.Session(conf.key, conf.secret)\n boto_resource = boto_session.resource('s3')\n return boto_resource.meta.client.get_bucket_location(Bucket=conf.bucket)[\"LocationConstraint\"]\n\n except ClientError as ex:\n report(\"Failed getting bucket location (region) for bucket \"\n \"%s: %s (%s, access_key=%s). Default region will be used. \"\n \"This is normal if you do not have GET_BUCKET_LOCATION permission\"\n % (conf.bucket, ex.response['Error']['Message'], ex.response['Error']['Code'], conf.key))\n except Exception as ex:\n report(\"Failed getting bucket location (region) for bucket %s: %s. Default region will be used.\"\n % (conf.bucket, str(ex)))\n\n return None\n\n def get_direct_access(self, remote_path, **_):\n return None\n\n def test_upload(self, test_path, config, **_):\n return True\n\n\nclass _GoogleCloudStorageDriver(_Driver):\n \"\"\"Storage driver for google cloud storage\"\"\"\n\n _stream_download_pool_connections = 128\n _stream_download_pool = None\n\n _containers = {}\n\n scheme = 'gs'\n scheme_prefix = str(furl(scheme=scheme, netloc=''))\n\n class _Container(object):\n def __init__(self, name, cfg):\n try:\n from google.cloud import storage\n from google.oauth2 import service_account\n except ImportError:\n raise UsageError(\n 'Google cloud driver not found. '\n 'Please install driver using: pip install \\\"google-cloud-storage>=1.13.2\\\"'\n )\n\n self.name = name[len(_GoogleCloudStorageDriver.scheme_prefix):]\n\n if cfg.credentials_json:\n credentials = service_account.Credentials.from_service_account_file(cfg.credentials_json)\n else:\n credentials = None\n\n self.client = storage.Client(project=cfg.project, credentials=credentials)\n for adapter in self.client._http.adapters.values():\n if cfg.pool_connections:\n adapter._pool_connections = cfg.pool_connections\n if cfg.pool_maxsize:\n adapter._pool_maxsize = cfg.pool_maxsize\n\n self.config = cfg\n self.bucket = self.client.bucket(self.name)\n\n def _get_stream_download_pool(self):\n if self._stream_download_pool is None:\n self._stream_download_pool = ThreadPoolExecutor(max_workers=self._stream_download_pool_connections)\n return self._stream_download_pool\n\n def get_container(self, container_name, config=None, **kwargs):\n if container_name not in self._containers:\n self._containers[container_name] = self._Container(name=container_name, cfg=config)\n self._containers[container_name].config.retries = kwargs.get('retries', 5)\n return self._containers[container_name]\n\n def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs):\n try:\n blob = container.bucket.blob(object_name)\n blob.upload_from_file(iterator)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n return True\n\n def upload_object(self, file_path, container, object_name, extra=None, **kwargs):\n try:\n blob = container.bucket.blob(object_name)\n blob.upload_from_filename(file_path)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n return True\n\n def list_container_objects(self, container, **kwargs):\n return list(container.bucket.list_blobs())\n\n def delete_object(self, object, **kwargs):\n try:\n object.delete()\n except Exception as ex:\n try:\n from google.cloud.exceptions import NotFound\n if isinstance(ex, NotFound):\n return False\n except ImportError:\n pass\n name = getattr(object, \"name\", \"\")\n self.get_logger().warning(\"Failed deleting object {}: {}\".format(name, ex))\n return False\n\n return not object.exists()\n\n def get_object(self, container_name, object_name, *args, **kwargs):\n full_container_name = str(furl(scheme=self.scheme, netloc=container_name))\n container = self._containers[full_container_name]\n obj = container.bucket.blob(object_name)\n obj.container_name = full_container_name\n return obj\n\n def download_object_as_stream(self, obj, chunk_size=256 * 1024, **_):\n raise NotImplementedError('Unsupported for google storage')\n\n def async_download(a_obj, a_stream):\n try:\n a_obj.download_to_file(a_stream)\n except Exception as ex:\n self.get_logger().error('Failed downloading: %s' % ex)\n a_stream.close()\n\n # return iterable object\n stream = _Stream()\n obj.chunk_size = chunk_size\n self._get_stream_download_pool().submit(async_download, obj, stream)\n\n return stream\n\n def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):\n p = Path(local_path)\n if not overwrite_existing and p.is_file():\n self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))\n return\n obj.download_to_filename(str(p))\n\n def test_upload(self, test_path, config, **_):\n bucket_url = str(furl(scheme=self.scheme, netloc=config.bucket, path=config.subdir))\n bucket = self.get_container(container_name=bucket_url, config=config).bucket\n\n test_obj = bucket\n\n if test_path:\n if not test_path.endswith('/'):\n test_path += '/'\n\n blob = bucket.blob(test_path)\n\n if blob.exists():\n test_obj = blob\n\n permissions_to_test = ('storage.objects.get', 'storage.objects.update')\n return set(test_obj.test_iam_permissions(permissions_to_test)) == set(permissions_to_test)\n\n def get_direct_access(self, remote_path, **_):\n return None\n\n\nclass _AzureBlobServiceStorageDriver(_Driver):\n scheme = 'azure'\n\n _containers = {}\n\n class _Container(object):\n def __init__(self, name, config):\n try:\n from azure.common import AzureHttpError # noqa: F401\n from azure.storage.blob import BlockBlobService\n except ImportError:\n raise UsageError(\n 'Azure blob storage driver not found. '\n 'Please install driver using: pip install \\\"azure.storage.blob<=2.1.0\\\"'\n )\n\n self.name = name\n self.config = config\n self.blob_service = BlockBlobService(\n account_name=config.account_name,\n account_key=config.account_key,\n )\n\n @attrs\n class _Object(object):\n container = attrib()\n blob_name = attrib()\n content_length = attrib()\n\n def get_container(self, container_name=None, config=None, **kwargs):\n container_name = container_name or config.container_name\n if container_name not in self._containers:\n self._containers[container_name] = self._Container(name=container_name, config=config)\n # self._containers[container_name].config.retries = kwargs.get('retries', 5)\n return self._containers[container_name]\n\n def upload_object_via_stream(self, iterator, container, object_name, callback=None, extra=None, **kwargs):\n from azure.common import AzureHttpError # noqa\n\n blob_name = self._blob_name_from_object_path(object_name, container.name) # noqa: F841\n try:\n container.blob_service.MAX_SINGLE_PUT_SIZE = 16 * 1024 * 1024\n container.blob_service.socket_timeout = (300, 2000)\n container.blob_service.create_blob_from_bytes(\n container.name,\n object_name,\n iterator.read() if hasattr(iterator, \"read\") else bytes(iterator),\n # timeout=300,\n max_connections=2,\n progress_callback=callback,\n )\n return True\n except AzureHttpError as ex:\n self.get_logger().error('Failed uploading (Azure error): %s' % ex)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n return False\n\n def upload_object(self, file_path, container, object_name, callback=None, extra=None, **kwargs):\n from azure.common import AzureHttpError # noqa\n\n blob_name = self._blob_name_from_object_path(object_name, container.name)\n stream = None\n try:\n from azure.storage.blob import ContentSettings # noqa\n from mimetypes import guess_type\n container.blob_service.MAX_SINGLE_PUT_SIZE = 16 * 1024 * 1024\n container.blob_service.socket_timeout = (300, 2000)\n container.blob_service.create_blob_from_path(\n container.name,\n blob_name,\n file_path,\n # timeout=300,\n max_connections=2,\n content_settings=ContentSettings(content_type=guess_type(file_path)),\n progress_callback=callback,\n )\n return True\n except AzureHttpError as ex:\n self.get_logger().error('Failed uploading (Azure error): %s' % ex)\n except Exception as ex:\n self.get_logger().error('Failed uploading: %s' % ex)\n finally:\n if stream:\n stream.close()\n\n def list_container_objects(self, container, ex_prefix=None, **kwargs):\n return list(container.blob_service.list_blobs(container_name=container.name, prefix=ex_prefix))\n\n def delete_object(self, object, **kwargs):\n container = object.container\n container.blob_service.delete_blob(\n container.name,\n object.blob_name,\n )\n return not object.container.blob_service.exists(container.name, object.blob_name)\n\n def get_object(self, container_name, object_name, *args, **kwargs):\n container = self._containers.get(container_name)\n if not container:\n raise StorageError(\"Container `{}` not found for object {}\".format(container_name, object_name))\n\n # blob_name = self._blob_name_from_object_path(object_name, container_name)\n blob = container.blob_service.get_blob_properties(container.name, object_name)\n\n return self._Object(container=container, blob_name=blob.name, content_length=blob.properties.content_length)\n\n def download_object_as_stream(self, obj, verbose, *_, **__):\n container = obj.container\n total_size_mb = obj.content_length / (1024. * 1024.)\n remote_path = os.path.join(\n \"{}://\".format(self.scheme),\n container.config.account_name,\n container.name,\n obj.blob_name\n )\n cb = DownloadProgressReport(total_size_mb, verbose, remote_path, self.get_logger())\n blob = container.blob_service.get_blob_to_bytes(\n container.name,\n obj.blob_name,\n progress_callback=cb,\n )\n return blob.content\n\n def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None, **_):\n p = Path(local_path)\n if not overwrite_existing and p.is_file():\n self.get_logger().warning('failed saving after download: overwrite=False and file exists (%s)' % str(p))\n return\n\n download_done = threading.Event()\n download_done.counter = 0\n\n def callback_func(current, total):\n if callback:\n chunk = current - download_done.counter\n download_done.counter += chunk\n callback(chunk)\n if current >= total:\n download_done.set()\n\n container = obj.container\n container.blob_service.MAX_SINGLE_GET_SIZE = 5 * 1024 * 1024\n _ = container.blob_service.get_blob_to_path(\n container.name,\n obj.blob_name,\n local_path,\n max_connections=10,\n progress_callback=callback_func,\n )\n download_done.wait()\n\n def test_upload(self, test_path, config, **_):\n container = self.get_container(config=config)\n try:\n container.blob_service.get_container_properties(container.name)\n except Exception:\n return False\n else:\n # Using the account Key, we can always upload...\n return True\n\n @classmethod\n def _blob_name_from_object_path(cls, name, container_name):\n scheme = urlparse(name).scheme\n if scheme:\n if scheme != cls.scheme:\n raise StorageError(\n \"When using a URL, only the `{}` scheme is supported for Azure storage: {}\",\n cls.scheme,\n name,\n )\n\n f = furl(name)\n\n if not f.path.segments:\n raise StorageError(\n \"Missing container name in URL {}\",\n name,\n )\n\n parsed_container_name = f.path.segments[0]\n\n if parsed_container_name != container_name:\n raise StorageError(\n \"Container name mismatch (expected {}, found {}) in {}\",\n container_name,\n parsed_container_name,\n name,\n )\n\n if len(f.path.segments) == 1:\n raise StorageError(\n \"No path found following container name {} in {}\",\n container_name,\n name,\n )\n\n return f.path.segments[0], os.path.join(*f.path.segments[1:])\n\n return name\n\n def get_direct_access(self, remote_path, **_):\n return None\n\n\nclass _FileStorageDriver(_Driver):\n \"\"\"\n A base StorageDriver to derive from.\n \"\"\"\n\n scheme = \"file\"\n CHUNK_SIZE = 8096\n IGNORE_FOLDERS = ['.lock', '.hash']\n Object = namedtuple(\"Object\", ['name', 'size', 'extra', 'driver', 'container', 'hash', 'meta_data'])\n\n class _Container(object):\n def __init__(self, name, extra, driver):\n self.name = name\n self.extra = extra\n self.driver = driver\n\n def __init__(self, key, secret=None, secure=True, host=None, port=None,\n **kwargs):\n\n # Use the key as the path to the storage\n self.base_path = key\n\n def _make_path(self, path, ignore_existing=True):\n \"\"\"\n Create a path by checking if it already exists\n \"\"\"\n\n try:\n os.makedirs(path)\n except OSError:\n exp = sys.exc_info()[1]\n if exp.errno == errno.EEXIST and not ignore_existing:\n raise exp\n\n def _check_container_name(self, container_name):\n \"\"\"\n Check if the container name is valid\n\n :param container_name: Container name\n :type container_name: ``str``\n \"\"\"\n\n if '/' in container_name or '\\\\' in container_name:\n raise ValueError(\"Container name \\\"{}\\\" cannot contain \\\\ or / \".format(container_name))\n\n def _make_container(self, container_name):\n \"\"\"\n Create a container instance\n\n :param container_name: Container name.\n :type container_name: ``str``\n\n :return: A Container instance.\n \"\"\"\n container_name = container_name or '.'\n self._check_container_name(container_name)\n\n full_path = os.path.realpath(os.path.join(self.base_path, container_name))\n\n try:\n stat = os.stat(full_path)\n if not os.path.isdir(full_path):\n raise OSError(\"Target path \\\"{}\\\" is not a directory\".format(full_path))\n except OSError:\n raise OSError(\"Target path \\\"{}\\\" is not accessible or does not exist\".format(full_path))\n\n extra = {\n 'creation_time': stat.st_ctime,\n 'access_time': stat.st_atime,\n 'modify_time': stat.st_mtime,\n }\n\n return self._Container(name=container_name, extra=extra, driver=self)\n\n def _make_object(self, container, object_name):\n \"\"\"\n Create an object instance\n\n :param container: Container.\n :type container: :class:`Container`\n\n :param object_name: Object name.\n :type object_name: ``str``\n\n :return: A Object instance.\n \"\"\"\n\n full_path = os.path.realpath(os.path.join(self.base_path, container.name if container else '.', object_name))\n\n if os.path.isdir(full_path):\n raise ValueError(\"Target path \\\"{}\\\" already exist\".format(full_path))\n\n try:\n stat = os.stat(full_path)\n except Exception:\n raise ValueError(\"Cannot access target path \\\"{}\\\"\".format(full_path))\n\n extra = {\n 'creation_time': stat.st_ctime,\n 'access_time': stat.st_atime,\n 'modify_time': stat.st_mtime,\n }\n\n return self.Object(name=object_name, size=stat.st_size, extra=extra,\n driver=self, container=container, hash=None, meta_data=None)\n\n def iterate_containers(self):\n \"\"\"\n Return a generator of containers.\n\n :return: A generator of Container instances.\n \"\"\"\n\n for container_name in os.listdir(self.base_path):\n full_path = os.path.join(self.base_path, container_name)\n if not os.path.isdir(full_path):\n continue\n yield self._make_container(container_name)\n\n def _get_objects(self, container):\n \"\"\"\n Recursively iterate through the file-system and return the object names\n \"\"\"\n\n cpath = self.get_container_cdn_url(container, check=True)\n\n for folder, subfolders, files in os.walk(cpath, topdown=True):\n # Remove unwanted subfolders\n for subf in self.IGNORE_FOLDERS:\n if subf in subfolders:\n subfolders.remove(subf)\n\n for name in files:\n full_path = os.path.join(folder, name)\n object_name = os.path.relpath(full_path, start=cpath)\n yield self._make_object(container, object_name)\n\n def iterate_container_objects(self, container):\n \"\"\"\n Returns a generator of objects for the given container.\n\n :param container: Container instance\n :type container: :class:`Container`\n\n :return: A generator of Object instances.\n \"\"\"\n\n return self._get_objects(container)\n\n def get_container(self, container_name, **_):\n \"\"\"\n Return a container instance.\n\n :param container_name: Container name.\n :type container_name: ``str``\n\n :return: A Container instance.\n \"\"\"\n return self._make_container(container_name)\n\n def get_container_cdn_url(self, container, check=False):\n \"\"\"\n Return a container CDN URL.\n\n :param container: Container instance\n :type container: :class:`Container`\n\n :param check: Indicates if the path's existence must be checked\n :type check: ``bool``\n\n :return: A CDN URL for this container.\n \"\"\"\n path = os.path.realpath(os.path.join(self.base_path, container.name if container else '.'))\n\n if check and not os.path.isdir(path):\n raise ValueError(\"Target path \\\"{}\\\" does not exist\".format(path))\n\n return path\n\n def get_object(self, container_name, object_name, **_):\n \"\"\"\n Return an object instance.\n\n :param container_name: Container name.\n :type container_name: ``str``\n\n :param object_name: Object name.\n :type object_name: ``str``\n\n :return: An Object instance.\n \"\"\"\n container = self._make_container(container_name)\n return self._make_object(container, object_name)\n\n def get_object_cdn_url(self, obj):\n \"\"\"\n Return an object CDN URL.\n\n :param obj: Object instance\n :type obj: :class:`Object`\n\n :return: A CDN URL for this object.\n \"\"\"\n return os.path.realpath(os.path.join(self.base_path, obj.container.name, obj.name))\n\n def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True, **_):\n \"\"\"\n Download an object to the specified destination path.\n\n :param obj: Object instance.\n :type obj: :class:`Object`\n\n :param destination_path: Full path to a file or a directory where the\n incoming file will be saved.\n :type destination_path: ``str``\n\n :param overwrite_existing: True to overwrite an existing file,\n defaults to False.\n :type overwrite_existing: ``bool``\n\n :param delete_on_failure: True to delete a partially downloaded file if\n the download was not successful (hash mismatch / file size).\n :type delete_on_failure: ``bool``\n\n :return: True, if an object has been successfully downloaded, False, otherwise.\n \"\"\"\n\n obj_path = self.get_object_cdn_url(obj)\n base_name = os.path.basename(destination_path)\n\n if not base_name and not os.path.exists(destination_path):\n raise ValueError('Path \\\"{}\\\" does not exist'.format(destination_path))\n\n if not base_name:\n file_path = os.path.join(destination_path, obj.name)\n else:\n file_path = destination_path\n\n if os.path.exists(file_path) and not overwrite_existing:\n raise ValueError('File \\\"{}\\\" already exists, but overwrite_existing=False'.format(file_path))\n\n try:\n shutil.copy(obj_path, file_path)\n except IOError:\n if delete_on_failure:\n # noinspection PyBroadException\n try:\n os.unlink(file_path)\n except Exception:\n pass\n return False\n\n return True\n\n def download_object_as_stream(self, obj, chunk_size=None, **_):\n \"\"\"\n Return a generator which yields object data.\n\n :param obj: Object instance\n :type obj: :class:`Object`\n\n :param chunk_size: Optional chunk size (in bytes).\n :type chunk_size: ``int``\n\n :return: A stream of binary chunks of data.\n \"\"\"\n path = self.get_object_cdn_url(obj)\n with open(path, 'rb') as obj_file:\n for data in self._read_in_chunks(obj_file, chunk_size=chunk_size):\n yield data\n\n def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, **_):\n \"\"\"\n Upload an object currently located on a disk.\n\n :param file_path: Path to the object on disk.\n :type file_path: ``str``\n\n :param container: Destination container.\n :type container: :class:`Container`\n\n :param object_name: Object name.\n :type object_name: ``str``\n\n :param verify_hash: Verify hast\n :type verify_hash: ``bool``\n\n :param extra: (optional) Extra attributes (driver specific).\n :type extra: ``dict``\n \"\"\"\n\n path = self.get_container_cdn_url(container, check=True)\n obj_path = os.path.join(path, object_name)\n base_path = os.path.dirname(obj_path)\n\n self._make_path(base_path)\n\n shutil.copy(file_path, obj_path)\n\n os.chmod(obj_path, int('664', 8))\n\n return self._make_object(container, object_name)\n\n def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs):\n \"\"\"\n Upload an object using an iterator.\n\n If a provider supports it, chunked transfer encoding is used and you\n don't need to know in advance the amount of data to be uploaded.\n\n Otherwise if a provider doesn't support it, iterator will be exhausted\n so a total size for data to be uploaded can be determined.\n\n Note: Exhausting the iterator means that the whole data must be\n buffered in memory which might result in memory exhausting when\n uploading a very large object.\n\n If a file is located on a disk you are advised to use upload_object\n function which uses fs.stat function to determine the file size and it\n doesn't need to buffer whole object in the memory.\n\n :type iterator: ``object``\n :param iterator: An object which implements the iterator\n interface and yields binary chunks of data.\n\n :type container: :class:`Container`\n :param container: Destination container.\n\n :type object_name: ``str``\n :param object_name: Object name.\n\n :type extra: ``dict``\n :param extra: (optional) Extra attributes (driver specific). Note:\n This dictionary must contain a 'content_type' key which represents\n a content type of the stored object.\n \"\"\"\n path = self.get_container_cdn_url(container, check=True)\n obj_path = os.path.join(path, object_name)\n base_path = os.path.dirname(obj_path)\n self._make_path(base_path)\n\n obj_path = os.path.realpath(obj_path)\n with open(obj_path, 'wb' if not isinstance(iterator, StringIO) else 'wt') as obj_file:\n obj_file.write(iterator.read() if hasattr(iterator, 'read') else bytes(iterator))\n\n os.chmod(obj_path, int('664', 8))\n return self._make_object(container, object_name)\n\n def delete_object(self, obj, **_):\n \"\"\"\n Delete an object.\n\n :type obj: :class:`Object`\n :param obj: Object instance.\n\n :return: True on success.\n \"\"\"\n\n path = self.get_object_cdn_url(obj)\n\n try:\n os.unlink(path)\n except Exception:\n return False\n\n # # Check and delete all the empty parent folders\n # path = os.path.dirname(path)\n # container_url = obj.container.get_cdn_url()\n #\n # # Delete the empty parent folders till the container's level\n # while path != container_url:\n # try:\n # os.rmdir(path)\n # except OSError:\n # exp = sys.exc_info()[1]\n # if exp.errno == errno.ENOTEMPTY:\n # break\n # raise exp\n #\n # path = os.path.dirname(path)\n\n return True\n\n def create_container(self, container_name):\n \"\"\"\n Create a new container.\n\n :type container_name: ``str``\n :param container_name: Container name.\n\n :return: A Container instance on success.\n \"\"\"\n container_name = container_name or '.'\n self._check_container_name(container_name)\n\n path = os.path.join(self.base_path, container_name)\n\n try:\n self._make_path(path, ignore_existing=False)\n except OSError:\n exp = sys.exc_info()[1]\n if exp.errno == errno.EEXIST:\n raise ValueError('Container \\\"{}\\\" with this name already exists. The name '\n 'must be unique among all the containers in the '\n 'system'.format(container_name))\n else:\n raise ValueError('Error creating container \\\"{}\\\"'.format(container_name))\n except Exception:\n raise ValueError('Error creating container \\\"{}\\\"'.format(container_name))\n\n return self._make_container(container_name)\n\n def delete_container(self, container):\n \"\"\"\n Delete a container.\n\n :type container: :class:`Container`\n :param container: Container instance\n\n :return: True on success, False otherwise.\n \"\"\"\n\n # Check if there are any objects inside this\n for obj in self._get_objects(container):\n raise ValueError('Container \\\"{}\\\" is not empty'.format(container.name))\n\n path = self.get_container_cdn_url(container, check=True)\n\n # noinspection PyBroadException\n try:\n shutil.rmtree(path)\n except Exception:\n return False\n\n return True\n\n def list_container_objects(self, container, **kwargs):\n return list(self.iterate_container_objects(container))\n\n @staticmethod\n def _read_in_chunks(iterator, chunk_size=None, fill_size=False, yield_empty=False):\n \"\"\"\n Return a generator which yields data in chunks.\n\n :param iterator: An object which implements an iterator interface\n or a File like object with read method.\n :type iterator: :class:`object` which implements iterator interface.\n\n :param chunk_size: Optional chunk size (defaults to CHUNK_SIZE)\n :type chunk_size: ``int``\n\n :param fill_size: If True, make sure chunks are exactly chunk_size in\n length (except for last chunk).\n :type fill_size: ``bool``\n\n :param yield_empty: If true and iterator returned no data, only yield empty\n bytes object\n :type yield_empty: ``bool``\n\n TODO: At some point in the future we could use byte arrays here if version\n >= Python 3. This should speed things up a bit and reduce memory usage.\n \"\"\"\n chunk_size = chunk_size or _FileStorageDriver.CHUNK_SIZE\n if six.PY3:\n from io import FileIO as file\n\n if isinstance(iterator, (file)):\n get_data = iterator.read\n args = (chunk_size,)\n else:\n get_data = next\n args = (iterator,)\n\n data = bytes('')\n empty = False\n\n while not empty or len(data) > 0:\n if not empty:\n try:\n chunk = bytes(get_data(*args))\n if len(chunk) > 0:\n data += chunk\n else:\n empty = True\n except StopIteration:\n empty = True\n\n if len(data) == 0:\n if empty and yield_empty:\n yield bytes('')\n\n return\n\n if fill_size:\n if empty or len(data) >= chunk_size:\n yield data[:chunk_size]\n data = data[chunk_size:]\n else:\n yield data\n data = bytes('')\n\n def get_direct_access(self, remote_path, **_):\n # this will always make sure we have full path and file:// prefix\n full_url = StorageHelper.conform_url(remote_path)\n # now get rid of the file:// prefix\n path = Path(full_url[7:])\n if not path.exists():\n raise ValueError(\"Requested path does not exist: {}\".format(path))\n return path.as_posix()\n\n def test_upload(self, test_path, config, **kwargs):\n return True\n\n\ndriver_schemes = set(\n filter(\n None,\n itertools.chain(\n (getattr(cls, \"scheme\", None) for cls in _Driver.__subclasses__()),\n *(getattr(cls, \"schemes\", []) for cls in _Driver.__subclasses__())\n )\n )\n)\n\nremote_driver_schemes = driver_schemes - {_FileStorageDriver.scheme}\n" ]
[ [ "numpy.frombuffer" ] ]
bwconrad/solo-learn
[ "ec510d803a4428d7d8803b90fa1484c42cb9cb52" ]
[ "downstream/tinypersons/mmdet/datasets/pipelines/formating.py" ]
[ "from collections.abc import Sequence\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.parallel import DataContainer as DC\n\nfrom ..builder import PIPELINES\n\n\ndef to_tensor(data):\n \"\"\"Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n\n Args:\n data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to\n be converted.\n \"\"\"\n\n if isinstance(data, torch.Tensor):\n return data\n elif isinstance(data, np.ndarray):\n return torch.from_numpy(data)\n elif isinstance(data, Sequence) and not mmcv.is_str(data):\n return torch.tensor(data)\n elif isinstance(data, int):\n return torch.LongTensor([data])\n elif isinstance(data, float):\n return torch.FloatTensor([data])\n else:\n raise TypeError(f'type {type(data)} cannot be converted to tensor.')\n\n\[email protected]_module()\nclass ToTensor:\n \"\"\"Convert some results to :obj:`torch.Tensor` by given keys.\n\n Args:\n keys (Sequence[str]): Keys that need to be converted to Tensor.\n \"\"\"\n\n def __init__(self, keys):\n self.keys = keys\n\n def __call__(self, results):\n \"\"\"Call function to convert data in results to :obj:`torch.Tensor`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted\n to :obj:`torch.Tensor`.\n \"\"\"\n for key in self.keys:\n results[key] = to_tensor(results[key])\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self.keys})'\n\n\[email protected]_module()\nclass ImageToTensor:\n \"\"\"Convert image to :obj:`torch.Tensor` by given keys.\n\n The dimension order of input image is (H, W, C). The pipeline will convert\n it to (C, H, W). If only 2 dimension (H, W) is given, the output would be\n (1, H, W).\n\n Args:\n keys (Sequence[str]): Key of images to be converted to Tensor.\n \"\"\"\n\n def __init__(self, keys):\n self.keys = keys\n\n def __call__(self, results):\n \"\"\"Call function to convert image in results to :obj:`torch.Tensor` and\n transpose the channel order.\n\n Args:\n results (dict): Result dict contains the image data to convert.\n\n Returns:\n dict: The result dict contains the image converted\n to :obj:`torch.Tensor` and transposed to (C, H, W) order.\n \"\"\"\n for key in self.keys:\n img = results[key]\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n results[key] = to_tensor(img.transpose(2, 0, 1))\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self.keys})'\n\n\[email protected]_module()\nclass Transpose:\n \"\"\"Transpose some results by given keys.\n\n Args:\n keys (Sequence[str]): Keys of results to be transposed.\n order (Sequence[int]): Order of transpose.\n \"\"\"\n\n def __init__(self, keys, order):\n self.keys = keys\n self.order = order\n\n def __call__(self, results):\n \"\"\"Call function to transpose the channel order of data in results.\n\n Args:\n results (dict): Result dict contains the data to transpose.\n\n Returns:\n dict: The result dict contains the data transposed to \\\n ``self.order``.\n \"\"\"\n for key in self.keys:\n results[key] = results[key].transpose(self.order)\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(keys={self.keys}, order={self.order})'\n\n\[email protected]_module()\nclass ToDataContainer:\n \"\"\"Convert results to :obj:`mmcv.DataContainer` by given fields.\n\n Args:\n fields (Sequence[dict]): Each field is a dict like\n ``dict(key='xxx', **kwargs)``. The ``key`` in result will\n be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.\n Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),\n dict(key='gt_labels'))``.\n \"\"\"\n\n def __init__(self,\n fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),\n dict(key='gt_labels'))):\n self.fields = fields\n\n def __call__(self, results):\n \"\"\"Call function to convert data in results to\n :obj:`mmcv.DataContainer`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted to \\\n :obj:`mmcv.DataContainer`.\n \"\"\"\n\n for field in self.fields:\n field = field.copy()\n key = field.pop('key')\n results[key] = DC(results[key], **field)\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(fields={self.fields})'\n\n\[email protected]_module()\nclass DefaultFormatBundle:\n \"\"\"Default formatting bundle.\n\n It simplifies the pipeline of formatting common fields, including \"img\",\n \"proposals\", \"gt_bboxes\", \"gt_labels\", \"gt_masks\" and \"gt_semantic_seg\".\n These fields are formatted as follows.\n\n - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)\n - proposals: (1)to tensor, (2)to DataContainer\n - gt_bboxes: (1)to tensor, (2)to DataContainer\n - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer\n - gt_labels: (1)to tensor, (2)to DataContainer\n - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)\n - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \\\n (3)to DataContainer (stack=True)\n \"\"\"\n\n def __call__(self, results):\n \"\"\"Call function to transform and format common fields in results.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data that is formatted with \\\n default bundle.\n \"\"\"\n\n if 'img' in results:\n img = results['img']\n # add default meta keys\n results = self._add_default_meta_keys(results)\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n img = np.ascontiguousarray(img.transpose(2, 0, 1))\n results['img'] = DC(to_tensor(img), stack=True)\n for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:\n if key not in results:\n continue\n results[key] = DC(to_tensor(results[key]))\n if 'gt_masks' in results:\n results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)\n if 'gt_semantic_seg' in results:\n results['gt_semantic_seg'] = DC(\n to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)\n return results\n\n def _add_default_meta_keys(self, results):\n \"\"\"Add default meta keys.\n\n We set default meta keys including `pad_shape`, `scale_factor` and\n `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and\n `Pad` are implemented during the whole pipeline.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n results (dict): Updated result dict contains the data to convert.\n \"\"\"\n img = results['img']\n results.setdefault('pad_shape', img.shape)\n results.setdefault('scale_factor', 1.0)\n num_channels = 1 if len(img.shape) < 3 else img.shape[2]\n results.setdefault(\n 'img_norm_cfg',\n dict(\n mean=np.zeros(num_channels, dtype=np.float32),\n std=np.ones(num_channels, dtype=np.float32),\n to_rgb=False))\n return results\n\n def __repr__(self):\n return self.__class__.__name__\n\n\[email protected]_module()\nclass Collect:\n \"\"\"Collect data from the loader relevant to the specific task.\n\n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of \"img\", \"proposals\", \"gt_bboxes\",\n \"gt_bboxes_ignore\", \"gt_labels\", and/or \"gt_masks\".\n\n The \"img_meta\" item is always populated. The contents of the \"img_meta\"\n dictionary depends on \"meta_keys\". By default this includes:\n\n - \"img_shape\": shape of the image input to the network as a tuple \\\n (h, w, c). Note that images may be zero padded on the \\\n bottom/right if the batch tensor is larger than this shape.\n\n - \"scale_factor\": a float indicating the preprocessing scale\n\n - \"flip\": a boolean indicating if image flip transform was used\n\n - \"filename\": path to the image file\n\n - \"ori_shape\": original shape of the image as a tuple (h, w, c)\n\n - \"pad_shape\": image shape after padding\n\n - \"img_norm_cfg\": a dict of normalization information:\n\n - mean - per channel mean subtraction\n - std - per channel std divisor\n - to_rgb - bool indicating if bgr was converted to rgb\n\n Args:\n keys (Sequence[str]): Keys of results to be collected in ``data``.\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',\n 'pad_shape', 'scale_factor', 'flip', 'flip_direction',\n 'img_norm_cfg')``\n \"\"\"\n\n def __init__(self,\n keys,\n meta_keys=('filename', 'ori_filename', 'ori_shape',\n 'img_shape', 'pad_shape', 'scale_factor', 'flip',\n 'flip_direction', 'img_norm_cfg')):\n self.keys = keys\n self.meta_keys = meta_keys\n\n def __call__(self, results):\n \"\"\"Call function to collect keys in results. The keys in ``meta_keys``\n will be converted to :obj:mmcv.DataContainer.\n\n Args:\n results (dict): Result dict contains the data to collect.\n\n Returns:\n dict: The result dict contains the following keys\n\n - keys in``self.keys``\n - ``img_metas``\n \"\"\"\n\n data = {}\n img_meta = {}\n for key in self.meta_keys:\n img_meta[key] = results[key]\n data['img_metas'] = DC(img_meta, cpu_only=True)\n for key in self.keys:\n data[key] = results[key]\n return data\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(keys={self.keys}, meta_keys={self.meta_keys})'\n\n\[email protected]_module()\nclass WrapFieldsToLists:\n \"\"\"Wrap fields of the data dictionary into lists for evaluation.\n\n This class can be used as a last step of a test or validation\n pipeline for single image evaluation or inference.\n\n Example:\n >>> test_pipeline = [\n >>> dict(type='LoadImageFromFile'),\n >>> dict(type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n >>> dict(type='Pad', size_divisor=32),\n >>> dict(type='ImageToTensor', keys=['img']),\n >>> dict(type='Collect', keys=['img']),\n >>> dict(type='WrapFieldsToLists')\n >>> ]\n \"\"\"\n\n def __call__(self, results):\n \"\"\"Call function to wrap fields into lists.\n\n Args:\n results (dict): Result dict contains the data to wrap.\n\n Returns:\n dict: The result dict where value of ``self.keys`` are wrapped \\\n into list.\n \"\"\"\n\n # Wrap dict fields into lists\n for key, val in results.items():\n results[key] = [val]\n return results\n\n def __repr__(self):\n return f'{self.__class__.__name__}()'\n" ]
[ [ "numpy.ones", "torch.FloatTensor", "numpy.zeros", "torch.tensor", "torch.from_numpy", "numpy.expand_dims", "torch.LongTensor" ] ]
shaandesai1/transfer_diffeq
[ "29ab4f3ff16a58bc7b1751428e540a3bb135778c" ]
[ "visualizer.py" ]
[ "\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nfrom neurodiffeq import diff # the differentiation operation\nfrom neurodiffeq.conditions import IVP # the initial condition\nfrom neurodiffeq.networks import FCNN # fully-connect neural network\nfrom neurodiffeq.solvers import Solver1D\nfrom neurodiffeq.callbacks import WeightCallback\nfrom neurodiffeq.callbacks import WeightCallback1, SolutionCallback, SaddleCallback\nfrom neurodiffeq.callbacks import PeriodLocal\nfrom sklearn.metrics import mean_squared_error\n# from sklearn.metrics.pairwise import cosine_similarity\nimport copy\nimport matplotlib.pyplot as plt\n\nDIFFEQS_TRAIN = {\n 'exp': lambda u, t: [diff(u, t) + u],\n 'exp1': lambda u, t: [diff(u, t) - u],\n 'tanh': lambda u, t: [diff(u, t) + u ** 2 - 1],\n 'psig': lambda u, t: [diff(u, t) - 3 * u + u ** 2],\n 'r1': lambda u, t: [diff(u, t) - u + u ** 2 + u ** 3],\n 'r2': lambda u, t: [diff(u, t) + u + u ** 2],\n 'r3': lambda u, t: [diff(u, t) + u ** 2],\n 'r4': lambda u, t: [diff(u, t) - u ** 2],\n 'q1': lambda u, t: [diff(u, t) - u + u ** 2],\n 'q2': lambda u, t: [diff(u, t) - u + u ** 2 - u ** 3],\n 'q3': lambda u, t: [diff(u, t) + u ** 2 + u ** 4],\n 'q4': lambda u, t: [diff(u, t) - u ** 2 - u ** 4],\n 'high_order1': lambda u, t: [diff(u, t) + u - u ** 2 + u ** 3 - u ** 4 + u ** 5],\n 'high_order2': lambda u, t: [diff(u, t) - u + u ** 2 - u ** 3 + u ** 4 - u ** 5],\n 'baseline': lambda u, t: [diff(u,t)]\n}\n\n\nsolsa = np.load('data/q3_train_solution/3000.npy')\nsolsb = np.load('data/baseline_train_solution/3000.npy')\nanalytical =np.load('data/q3_gt_test_solution/3000.npy')\n# pre1 =np.load('data/q2_q2_pretrain_500_solution/500.npy')\n# pre2 =np.load('data/baseline_q2_pretrain_500_solution/500.npy')\n\nplt.figure()\nplt.plot(solsa,label='q2')\nplt.plot(solsb,label='high_order_2')\nplt.plot(analytical,label='analytical_q2')\n# plt.plot(pre1,label='pre_q2_q2')\n# plt.plot(pre2,label='pre_baseline_q2')\nplt.legend()\nplt.show()" ]
[ [ "numpy.load", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "matplotlib.pyplot.plot" ] ]
talahajeer/scikit-learn
[ "d66b42708a5912039740cd08f747229433e579b5", "d66b42708a5912039740cd08f747229433e579b5" ]
[ "sklearn/preprocessing/tests/test_polynomial.py", "sklearn/impute/_base.py" ]
[ "import numpy as np\nimport pytest\nfrom scipy import sparse\nfrom scipy.sparse import random as sparse_random\nfrom sklearn.utils._testing import assert_array_almost_equal\n\nfrom numpy.testing import assert_allclose, assert_array_equal\nfrom scipy.interpolate import BSpline\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import (\n KBinsDiscretizer,\n PolynomialFeatures,\n SplineTransformer,\n)\nfrom sklearn.utils.fixes import linspace, sp_version, parse_version\n\n\[email protected](\"est\", (PolynomialFeatures, SplineTransformer))\ndef test_polynomial_and_spline_array_order(est):\n \"\"\"Test that output array has the given order.\"\"\"\n X = np.arange(10).reshape(5, 2)\n\n def is_c_contiguous(a):\n return np.isfortran(a.T)\n\n assert is_c_contiguous(est().fit_transform(X))\n assert is_c_contiguous(est(order=\"C\").fit_transform(X))\n assert np.isfortran(est(order=\"F\").fit_transform(X))\n\n\[email protected](\n \"params, err_msg\",\n [\n ({\"degree\": -1}, \"degree must be a non-negative integer\"),\n ({\"degree\": 2.5}, \"degree must be a non-negative integer\"),\n ({\"degree\": \"string\"}, \"degree must be a non-negative integer\"),\n ({\"n_knots\": 1}, \"n_knots must be a positive integer >= 2.\"),\n ({\"n_knots\": 1}, \"n_knots must be a positive integer >= 2.\"),\n ({\"n_knots\": 2.5}, \"n_knots must be a positive integer >= 2.\"),\n ({\"n_knots\": \"string\"}, \"n_knots must be a positive integer >= 2.\"),\n ({\"knots\": 1}, \"Expected 2D array, got scalar array instead:\"),\n ({\"knots\": [1, 2]}, \"Expected 2D array, got 1D array instead:\"),\n (\n {\"knots\": [[1]]},\n r\"Number of knots, knots.shape\\[0\\], must be >= 2.\",\n ),\n (\n {\"knots\": [[1, 5], [2, 6]]},\n r\"knots.shape\\[1\\] == n_features is violated.\",\n ),\n (\n {\"knots\": [[1], [1], [2]]},\n \"knots must be sorted without duplicates.\",\n ),\n ({\"knots\": [[2], [1]]}, \"knots must be sorted without duplicates.\"),\n (\n {\"extrapolation\": None},\n \"extrapolation must be one of 'error', 'constant', 'linear', \"\n \"'continue' or 'periodic'.\",\n ),\n (\n {\"extrapolation\": 1},\n \"extrapolation must be one of 'error', 'constant', 'linear', \"\n \"'continue' or 'periodic'.\",\n ),\n (\n {\"extrapolation\": \"string\"},\n \"extrapolation must be one of 'error', 'constant', 'linear', \"\n \"'continue' or 'periodic'.\",\n ),\n ({\"include_bias\": None}, \"include_bias must be bool.\"),\n ({\"include_bias\": 1}, \"include_bias must be bool.\"),\n ({\"include_bias\": \"string\"}, \"include_bias must be bool.\"),\n (\n {\"extrapolation\": \"periodic\", \"n_knots\": 3, \"degree\": 3},\n \"Periodic splines require degree < n_knots. Got n_knots=3 and degree=3.\",\n ),\n (\n {\"extrapolation\": \"periodic\", \"knots\": [[0], [1]], \"degree\": 2},\n \"Periodic splines require degree < n_knots. Got n_knots=2 and degree=2.\",\n ),\n ],\n)\ndef test_spline_transformer_input_validation(params, err_msg):\n \"\"\"Test that we raise errors for invalid input in SplineTransformer.\"\"\"\n X = [[1], [2]]\n\n with pytest.raises(ValueError, match=err_msg):\n SplineTransformer(**params).fit(X)\n\n\ndef test_spline_transformer_manual_knot_input():\n \"\"\"\n Test that array-like knot positions in SplineTransformer are accepted.\n \"\"\"\n X = np.arange(20).reshape(10, 2)\n knots = [[0.5, 1], [1.5, 2], [5, 10]]\n st1 = SplineTransformer(degree=3, knots=knots, n_knots=None).fit(X)\n knots = np.asarray(knots)\n st2 = SplineTransformer(degree=3, knots=knots, n_knots=None).fit(X)\n for i in range(X.shape[1]):\n assert_allclose(st1.bsplines_[i].t, st2.bsplines_[i].t)\n\n\[email protected](\"extrapolation\", [\"continue\", \"periodic\"])\ndef test_spline_transformer_integer_knots(extrapolation):\n \"\"\"Test that SplineTransformer accepts integer value knot positions.\"\"\"\n X = np.arange(20).reshape(10, 2)\n knots = [[0, 1], [1, 2], [5, 5], [11, 10], [12, 11]]\n _ = SplineTransformer(\n degree=3, knots=knots, extrapolation=extrapolation\n ).fit_transform(X)\n\n\ndef test_spline_transformer_feature_names():\n \"\"\"Test that SplineTransformer generates correct features name.\"\"\"\n X = np.arange(20).reshape(10, 2)\n splt = SplineTransformer(n_knots=3, degree=3, include_bias=True).fit(X)\n feature_names = splt.get_feature_names()\n assert_array_equal(\n feature_names,\n [\n \"x0_sp_0\",\n \"x0_sp_1\",\n \"x0_sp_2\",\n \"x0_sp_3\",\n \"x0_sp_4\",\n \"x1_sp_0\",\n \"x1_sp_1\",\n \"x1_sp_2\",\n \"x1_sp_3\",\n \"x1_sp_4\",\n ],\n )\n\n splt = SplineTransformer(n_knots=3, degree=3, include_bias=False).fit(X)\n feature_names = splt.get_feature_names([\"a\", \"b\"])\n assert_array_equal(\n feature_names,\n [\n \"a_sp_0\",\n \"a_sp_1\",\n \"a_sp_2\",\n \"a_sp_3\",\n \"b_sp_0\",\n \"b_sp_1\",\n \"b_sp_2\",\n \"b_sp_3\",\n ],\n )\n\n\[email protected](\"degree\", range(1, 5))\[email protected](\"n_knots\", range(3, 5))\[email protected](\"knots\", [\"uniform\", \"quantile\"])\[email protected](\"extrapolation\", [\"constant\", \"periodic\"])\ndef test_spline_transformer_unity_decomposition(degree, n_knots, knots, extrapolation):\n \"\"\"Test that B-splines are indeed a decomposition of unity.\n\n Splines basis functions must sum up to 1 per row, if we stay in between\n boundaries.\n \"\"\"\n X = np.linspace(0, 1, 100)[:, None]\n # make the boundaries 0 and 1 part of X_train, for sure.\n X_train = np.r_[[[0]], X[::2, :], [[1]]]\n X_test = X[1::2, :]\n\n if extrapolation == \"periodic\":\n n_knots = n_knots + degree # periodic splines require degree < n_knots\n\n splt = SplineTransformer(\n n_knots=n_knots,\n degree=degree,\n knots=knots,\n include_bias=True,\n extrapolation=extrapolation,\n )\n splt.fit(X_train)\n for X in [X_train, X_test]:\n assert_allclose(np.sum(splt.transform(X), axis=1), 1)\n\n\[email protected]([\"bias\", \"intercept\"], [(True, False), (False, True)])\ndef test_spline_transformer_linear_regression(bias, intercept):\n \"\"\"Test that B-splines fit a sinusodial curve pretty well.\"\"\"\n X = np.linspace(0, 10, 100)[:, None]\n y = np.sin(X[:, 0]) + 2 # +2 to avoid the value 0 in assert_allclose\n pipe = Pipeline(\n steps=[\n (\n \"spline\",\n SplineTransformer(\n n_knots=15,\n degree=3,\n include_bias=bias,\n extrapolation=\"constant\",\n ),\n ),\n (\"ols\", LinearRegression(fit_intercept=intercept)),\n ]\n )\n pipe.fit(X, y)\n assert_allclose(pipe.predict(X), y, rtol=1e-3)\n\n\[email protected](\n \"knots, n_knots, degree\",\n [\n (\"uniform\", 5, 3),\n (\"uniform\", 12, 8),\n (\n [[-1.0, 0.0], [0, 1.0], [0.1, 2.0], [0.2, 3.0], [0.3, 4.0], [1, 5.0]],\n None,\n 3,\n ),\n ],\n)\ndef test_spline_transformer_periodicity_of_extrapolation(knots, n_knots, degree):\n \"\"\"Test that the SplineTransformer is periodic for multiple features.\"\"\"\n X_1 = linspace((-1, 0), (1, 5), 10)\n X_2 = linspace((1, 5), (3, 10), 10)\n\n splt = SplineTransformer(\n knots=knots, n_knots=n_knots, degree=degree, extrapolation=\"periodic\"\n )\n splt.fit(X_1)\n\n assert_allclose(splt.transform(X_1), splt.transform(X_2))\n\n\[email protected]([\"bias\", \"intercept\"], [(True, False), (False, True)])\ndef test_spline_transformer_periodic_linear_regression(bias, intercept):\n \"\"\"Test that B-splines fit a periodic curve pretty well.\"\"\"\n # \"+ 3\" to avoid the value 0 in assert_allclose\n def f(x):\n return np.sin(2 * np.pi * x) - np.sin(8 * np.pi * x) + 3\n\n X = np.linspace(0, 1, 101)[:, None]\n pipe = Pipeline(\n steps=[\n (\n \"spline\",\n SplineTransformer(\n n_knots=20,\n degree=3,\n include_bias=bias,\n extrapolation=\"periodic\",\n ),\n ),\n (\"ols\", LinearRegression(fit_intercept=intercept)),\n ]\n )\n pipe.fit(X, f(X[:, 0]))\n\n # Generate larger array to check periodic extrapolation\n X_ = np.linspace(-1, 2, 301)[:, None]\n predictions = pipe.predict(X_)\n assert_allclose(predictions, f(X_[:, 0]), atol=0.01, rtol=0.01)\n assert_allclose(predictions[0:100], predictions[100:200], rtol=1e-3)\n\n\[email protected](\n sp_version < parse_version(\"1.0.0\"),\n reason=\"Periodic extrapolation not yet implemented for BSpline.\",\n)\ndef test_spline_transformer_periodic_spline_backport():\n \"\"\"Test that the backport of extrapolate=\"periodic\" works correctly\"\"\"\n X = np.linspace(-2, 3.5, 10)[:, None]\n degree = 2\n\n # Use periodic extrapolation backport in SplineTransformer\n transformer = SplineTransformer(\n degree=degree, extrapolation=\"periodic\", knots=[[-1.0], [0.0], [1.0]]\n )\n Xt = transformer.fit_transform(X)\n\n # Use periodic extrapolation in BSpline\n coef = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])\n spl = BSpline(np.arange(-3, 4), coef, degree, \"periodic\")\n Xspl = spl(X[:, 0])\n assert_allclose(Xt, Xspl)\n\n\ndef test_spline_transformer_periodic_splines_periodicity():\n \"\"\"\n Test if shifted knots result in the same transformation up to permutation.\n \"\"\"\n X = np.linspace(0, 10, 101)[:, None]\n\n transformer_1 = SplineTransformer(\n degree=3,\n extrapolation=\"periodic\",\n knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]],\n )\n\n transformer_2 = SplineTransformer(\n degree=3,\n extrapolation=\"periodic\",\n knots=[[1.0], [3.0], [4.0], [5.0], [8.0], [9.0]],\n )\n\n Xt_1 = transformer_1.fit_transform(X)\n Xt_2 = transformer_2.fit_transform(X)\n\n assert_allclose(Xt_1, Xt_2[:, [4, 0, 1, 2, 3]])\n\n\[email protected](\"degree\", [3, 5])\ndef test_spline_transformer_periodic_splines_smoothness(degree):\n \"\"\"Test that spline transformation is smooth at first / last knot.\"\"\"\n X = np.linspace(-2, 10, 10_000)[:, None]\n\n transformer = SplineTransformer(\n degree=degree,\n extrapolation=\"periodic\",\n knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]],\n )\n Xt = transformer.fit_transform(X)\n\n delta = (X.max() - X.min()) / len(X)\n tol = 10 * delta\n\n dXt = Xt\n # We expect splines of degree `degree` to be (`degree`-1) times\n # continuously differentiable. I.e. for d = 0, ..., `degree` - 1 the d-th\n # derivative should be continous. This is the case if the (d+1)-th\n # numerical derivative is reasonably small (smaller than `tol` in absolute\n # value). We thus compute d-th numeric derivatives for d = 1, ..., `degree`\n # and compare them to `tol`.\n #\n # Note that the 0-th derivative is the function itself, such that we are\n # also checking its continuity.\n for d in range(1, degree + 1):\n # Check continuity of the (d-1)-th derivative\n diff = np.diff(dXt, axis=0)\n assert np.abs(diff).max() < tol\n # Compute d-th numeric derivative\n dXt = diff / delta\n\n # As degree `degree` splines are not `degree` times continously\n # differentiable at the knots, the `degree + 1`-th numeric derivative\n # should have spikes at the knots.\n diff = np.diff(dXt, axis=0)\n assert np.abs(diff).max() > 1\n\n\[email protected]([\"bias\", \"intercept\"], [(True, False), (False, True)])\[email protected](\"degree\", [1, 2, 3, 4, 5])\ndef test_spline_transformer_extrapolation(bias, intercept, degree):\n \"\"\"Test that B-spline extrapolation works correctly.\"\"\"\n # we use a straight line for that\n X = np.linspace(-1, 1, 100)[:, None]\n y = X.squeeze()\n\n # 'constant'\n pipe = Pipeline(\n [\n [\n \"spline\",\n SplineTransformer(\n n_knots=4,\n degree=degree,\n include_bias=bias,\n extrapolation=\"constant\",\n ),\n ],\n [\"ols\", LinearRegression(fit_intercept=intercept)],\n ]\n )\n pipe.fit(X, y)\n assert_allclose(pipe.predict([[-10], [5]]), [-1, 1])\n\n # 'linear'\n pipe = Pipeline(\n [\n [\n \"spline\",\n SplineTransformer(\n n_knots=4,\n degree=degree,\n include_bias=bias,\n extrapolation=\"linear\",\n ),\n ],\n [\"ols\", LinearRegression(fit_intercept=intercept)],\n ]\n )\n pipe.fit(X, y)\n assert_allclose(pipe.predict([[-10], [5]]), [-10, 5])\n\n # 'error'\n splt = SplineTransformer(\n n_knots=4, degree=degree, include_bias=bias, extrapolation=\"error\"\n )\n splt.fit(X)\n with pytest.raises(ValueError):\n splt.transform([[-10]])\n with pytest.raises(ValueError):\n splt.transform([[5]])\n\n\ndef test_spline_transformer_kbindiscretizer():\n \"\"\"Test that a B-spline of degree=0 is equivalent to KBinsDiscretizer.\"\"\"\n rng = np.random.RandomState(97531)\n X = rng.randn(200).reshape(200, 1)\n n_bins = 5\n n_knots = n_bins + 1\n\n splt = SplineTransformer(\n n_knots=n_knots, degree=0, knots=\"quantile\", include_bias=True\n )\n splines = splt.fit_transform(X)\n\n kbd = KBinsDiscretizer(n_bins=n_bins, encode=\"onehot-dense\", strategy=\"quantile\")\n kbins = kbd.fit_transform(X)\n\n # Though they should be exactly equal, we test approximately with high\n # accuracy.\n assert_allclose(splines, kbins, rtol=1e-13)\n\n\[email protected](\"n_knots\", [5, 10])\[email protected](\"include_bias\", [True, False])\[email protected](\"degree\", [3, 5])\ndef test_spline_transformer_n_features_out(n_knots, include_bias, degree):\n \"\"\"Test that transform results in n_features_out_ features.\"\"\"\n splt = SplineTransformer(n_knots=n_knots, degree=degree, include_bias=include_bias)\n X = np.linspace(0, 1, 10)[:, None]\n splt.fit(X)\n\n assert splt.transform(X).shape[1] == splt.n_features_out_\n\n\[email protected](\n \"params, err_msg\",\n [\n ({\"degree\": -1}, \"degree must be a non-negative integer\"),\n ({\"degree\": 2.5}, \"degree must be a non-negative int or tuple\"),\n ({\"degree\": \"12\"}, r\"degree=\\(min_degree, max_degree\\) must\"),\n ({\"degree\": \"string\"}, \"degree must be a non-negative int or tuple\"),\n ({\"degree\": (-1, 2)}, r\"degree=\\(min_degree, max_degree\\) must\"),\n ({\"degree\": (0, 1.5)}, r\"degree=\\(min_degree, max_degree\\) must\"),\n ({\"degree\": (3, 2)}, r\"degree=\\(min_degree, max_degree\\) must\"),\n ],\n)\ndef test_polynomial_features_input_validation(params, err_msg):\n \"\"\"Test that we raise errors for invalid input in PolynomialFeatures.\"\"\"\n X = [[1], [2]]\n\n with pytest.raises(ValueError, match=err_msg):\n PolynomialFeatures(**params).fit(X)\n\n\[email protected]()\ndef single_feature_degree3():\n X = np.arange(6)[:, np.newaxis]\n P = np.hstack([np.ones_like(X), X, X ** 2, X ** 3])\n return X, P\n\n\[email protected](\n \"degree, include_bias, interaction_only, indices\",\n [\n (3, True, False, slice(None, None)),\n (3, False, False, slice(1, None)),\n (3, True, True, [0, 1]),\n (3, False, True, [1]),\n ((2, 3), True, False, [0, 2, 3]),\n ((2, 3), False, False, [2, 3]),\n ((2, 3), True, True, [0]),\n ((2, 3), False, True, []),\n ],\n)\[email protected](\n \"sparse_X\",\n [False, sparse.csr_matrix, sparse.csc_matrix],\n)\ndef test_polynomial_features_one_feature(\n single_feature_degree3,\n degree,\n include_bias,\n interaction_only,\n indices,\n sparse_X,\n):\n \"\"\"Test PolynomialFeatures on single feature up to degree 3.\"\"\"\n X, P = single_feature_degree3\n if sparse_X:\n X = sparse_X(X)\n tf = PolynomialFeatures(\n degree=degree, include_bias=include_bias, interaction_only=interaction_only\n ).fit(X)\n out = tf.transform(X)\n if sparse_X:\n out = out.toarray()\n assert_allclose(out, P[:, indices])\n if tf.n_output_features_ > 0:\n assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_)\n\n\[email protected]()\ndef two_features_degree3():\n X = np.arange(6).reshape((3, 2))\n x1 = X[:, :1]\n x2 = X[:, 1:]\n P = np.hstack(\n [\n x1 ** 0 * x2 ** 0, # 0\n x1 ** 1 * x2 ** 0, # 1\n x1 ** 0 * x2 ** 1, # 2\n x1 ** 2 * x2 ** 0, # 3\n x1 ** 1 * x2 ** 1, # 4\n x1 ** 0 * x2 ** 2, # 5\n x1 ** 3 * x2 ** 0, # 6\n x1 ** 2 * x2 ** 1, # 7\n x1 ** 1 * x2 ** 2, # 8\n x1 ** 0 * x2 ** 3, # 9\n ]\n )\n return X, P\n\n\[email protected](\n \"degree, include_bias, interaction_only, indices\",\n [\n (2, True, False, slice(0, 6)),\n (2, False, False, slice(1, 6)),\n (2, True, True, [0, 1, 2, 4]),\n (2, False, True, [1, 2, 4]),\n ((2, 2), True, False, [0, 3, 4, 5]),\n ((2, 2), False, False, [3, 4, 5]),\n ((2, 2), True, True, [0, 4]),\n ((2, 2), False, True, [4]),\n (3, True, False, slice(None, None)),\n (3, False, False, slice(1, None)),\n (3, True, True, [0, 1, 2, 4]),\n (3, False, True, [1, 2, 4]),\n ((2, 3), True, False, [0, 3, 4, 5, 6, 7, 8, 9]),\n ((2, 3), False, False, slice(3, None)),\n ((2, 3), True, True, [0, 4]),\n ((2, 3), False, True, [4]),\n ((3, 3), True, False, [0, 6, 7, 8, 9]),\n ((3, 3), False, False, [6, 7, 8, 9]),\n ((3, 3), True, True, [0]),\n ((3, 3), False, True, []), # would need 3 input features\n ],\n)\[email protected](\n \"sparse_X\",\n [False, sparse.csr_matrix, sparse.csc_matrix],\n)\ndef test_polynomial_features_two_features(\n two_features_degree3,\n degree,\n include_bias,\n interaction_only,\n indices,\n sparse_X,\n):\n \"\"\"Test PolynomialFeatures on 2 features up to degree 3.\"\"\"\n X, P = two_features_degree3\n if sparse_X:\n X = sparse_X(X)\n tf = PolynomialFeatures(\n degree=degree, include_bias=include_bias, interaction_only=interaction_only\n ).fit(X)\n out = tf.transform(X)\n if sparse_X:\n out = out.toarray()\n assert_allclose(out, P[:, indices])\n if tf.n_output_features_ > 0:\n assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_)\n\n\ndef test_polynomial_feature_names():\n X = np.arange(30).reshape(10, 3)\n poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)\n feature_names = poly.get_feature_names()\n assert_array_equal(\n [\"1\", \"x0\", \"x1\", \"x2\", \"x0^2\", \"x0 x1\", \"x0 x2\", \"x1^2\", \"x1 x2\", \"x2^2\"],\n feature_names,\n )\n assert len(feature_names) == poly.transform(X).shape[1]\n\n poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)\n feature_names = poly.get_feature_names([\"a\", \"b\", \"c\"])\n assert_array_equal(\n [\n \"a\",\n \"b\",\n \"c\",\n \"a^2\",\n \"a b\",\n \"a c\",\n \"b^2\",\n \"b c\",\n \"c^2\",\n \"a^3\",\n \"a^2 b\",\n \"a^2 c\",\n \"a b^2\",\n \"a b c\",\n \"a c^2\",\n \"b^3\",\n \"b^2 c\",\n \"b c^2\",\n \"c^3\",\n ],\n feature_names,\n )\n assert len(feature_names) == poly.transform(X).shape[1]\n\n poly = PolynomialFeatures(degree=(2, 3), include_bias=False).fit(X)\n feature_names = poly.get_feature_names([\"a\", \"b\", \"c\"])\n assert_array_equal(\n [\n \"a^2\",\n \"a b\",\n \"a c\",\n \"b^2\",\n \"b c\",\n \"c^2\",\n \"a^3\",\n \"a^2 b\",\n \"a^2 c\",\n \"a b^2\",\n \"a b c\",\n \"a c^2\",\n \"b^3\",\n \"b^2 c\",\n \"b c^2\",\n \"c^3\",\n ],\n feature_names,\n )\n assert len(feature_names) == poly.transform(X).shape[1]\n\n poly = PolynomialFeatures(\n degree=(3, 3), include_bias=True, interaction_only=True\n ).fit(X)\n feature_names = poly.get_feature_names([\"a\", \"b\", \"c\"])\n assert_array_equal([\"1\", \"a b c\"], feature_names)\n assert len(feature_names) == poly.transform(X).shape[1]\n\n # test some unicode\n poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)\n feature_names = poly.get_feature_names([\"\\u0001F40D\", \"\\u262E\", \"\\u05D0\"])\n assert_array_equal([\"1\", \"\\u0001F40D\", \"\\u262E\", \"\\u05D0\"], feature_names)\n\n\[email protected](\n [\"deg\", \"include_bias\", \"interaction_only\", \"dtype\"],\n [\n (1, True, False, int),\n (2, True, False, int),\n (2, True, False, np.float32),\n (2, True, False, np.float64),\n (3, False, False, np.float64),\n (3, False, True, np.float64),\n (4, False, False, np.float64),\n (4, False, True, np.float64),\n ],\n)\ndef test_polynomial_features_csc_X(deg, include_bias, interaction_only, dtype):\n rng = np.random.RandomState(0)\n X = rng.randint(0, 2, (100, 2))\n X_csc = sparse.csc_matrix(X)\n\n est = PolynomialFeatures(\n deg, include_bias=include_bias, interaction_only=interaction_only\n )\n Xt_csc = est.fit_transform(X_csc.astype(dtype))\n Xt_dense = est.fit_transform(X.astype(dtype))\n\n assert isinstance(Xt_csc, sparse.csc_matrix)\n assert Xt_csc.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csc.A, Xt_dense)\n\n\[email protected](\n [\"deg\", \"include_bias\", \"interaction_only\", \"dtype\"],\n [\n (1, True, False, int),\n (2, True, False, int),\n (2, True, False, np.float32),\n (2, True, False, np.float64),\n (3, False, False, np.float64),\n (3, False, True, np.float64),\n ],\n)\ndef test_polynomial_features_csr_X(deg, include_bias, interaction_only, dtype):\n rng = np.random.RandomState(0)\n X = rng.randint(0, 2, (100, 2))\n X_csr = sparse.csr_matrix(X)\n\n est = PolynomialFeatures(\n deg, include_bias=include_bias, interaction_only=interaction_only\n )\n Xt_csr = est.fit_transform(X_csr.astype(dtype))\n Xt_dense = est.fit_transform(X.astype(dtype, copy=False))\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\[email protected](\"n_features\", [1, 4, 5])\[email protected](\n \"min_degree, max_degree\", [(0, 1), (0, 2), (1, 3), (0, 4), (3, 4)]\n)\[email protected](\"interaction_only\", [True, False])\[email protected](\"include_bias\", [True, False])\ndef test_num_combinations(\n n_features,\n min_degree,\n max_degree,\n interaction_only,\n include_bias,\n):\n \"\"\"\n Test that n_output_features_ is calculated correctly.\n \"\"\"\n x = sparse.csr_matrix(([1], ([0], [n_features - 1])))\n est = PolynomialFeatures(\n degree=max_degree,\n interaction_only=interaction_only,\n include_bias=include_bias,\n )\n est.fit(x)\n num_combos = est.n_output_features_\n\n combos = PolynomialFeatures._combinations(\n n_features=n_features,\n min_degree=0,\n max_degree=max_degree,\n interaction_only=interaction_only,\n include_bias=include_bias,\n )\n assert num_combos == sum([1 for _ in combos])\n\n\[email protected](\n [\"deg\", \"include_bias\", \"interaction_only\", \"dtype\"],\n [\n (2, True, False, np.float32),\n (2, True, False, np.float64),\n (3, False, False, np.float64),\n (3, False, True, np.float64),\n ],\n)\ndef test_polynomial_features_csr_X_floats(deg, include_bias, interaction_only, dtype):\n X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()\n X = X_csr.toarray()\n\n est = PolynomialFeatures(\n deg, include_bias=include_bias, interaction_only=interaction_only\n )\n Xt_csr = est.fit_transform(X_csr.astype(dtype))\n Xt_dense = est.fit_transform(X.astype(dtype))\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\[email protected](\n [\"zero_row_index\", \"deg\", \"interaction_only\"],\n [\n (0, 2, True),\n (1, 2, True),\n (2, 2, True),\n (0, 3, True),\n (1, 3, True),\n (2, 3, True),\n (0, 2, False),\n (1, 2, False),\n (2, 2, False),\n (0, 3, False),\n (1, 3, False),\n (2, 3, False),\n ],\n)\ndef test_polynomial_features_csr_X_zero_row(zero_row_index, deg, interaction_only):\n X_csr = sparse_random(3, 10, 1.0, random_state=0).tocsr()\n X_csr[zero_row_index, :] = 0.0\n X = X_csr.toarray()\n\n est = PolynomialFeatures(deg, include_bias=False, interaction_only=interaction_only)\n Xt_csr = est.fit_transform(X_csr)\n Xt_dense = est.fit_transform(X)\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\n# This degree should always be one more than the highest degree supported by\n# _csr_expansion.\[email protected](\n [\"include_bias\", \"interaction_only\"],\n [(True, True), (True, False), (False, True), (False, False)],\n)\ndef test_polynomial_features_csr_X_degree_4(include_bias, interaction_only):\n X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()\n X = X_csr.toarray()\n\n est = PolynomialFeatures(\n 4, include_bias=include_bias, interaction_only=interaction_only\n )\n Xt_csr = est.fit_transform(X_csr)\n Xt_dense = est.fit_transform(X)\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\[email protected](\n [\"deg\", \"dim\", \"interaction_only\"],\n [\n (2, 1, True),\n (2, 2, True),\n (3, 1, True),\n (3, 2, True),\n (3, 3, True),\n (2, 1, False),\n (2, 2, False),\n (3, 1, False),\n (3, 2, False),\n (3, 3, False),\n ],\n)\ndef test_polynomial_features_csr_X_dim_edges(deg, dim, interaction_only):\n X_csr = sparse_random(1000, dim, 0.5, random_state=0).tocsr()\n X = X_csr.toarray()\n\n est = PolynomialFeatures(deg, interaction_only=interaction_only)\n Xt_csr = est.fit_transform(X_csr)\n Xt_dense = est.fit_transform(X)\n\n assert isinstance(Xt_csr, sparse.csr_matrix)\n assert Xt_csr.dtype == Xt_dense.dtype\n assert_array_almost_equal(Xt_csr.A, Xt_dense)\n\n\ndef test_polynomial_features_deprecated_n_input_features():\n # check that we raise a deprecation warning when accessing\n # `n_input_features_`. FIXME: remove in 1.2\n depr_msg = (\n \"The attribute `n_input_features_` was deprecated in version \"\n \"1.0 and will be removed in 1.2.\"\n )\n X = np.arange(10).reshape(5, 2)\n\n with pytest.warns(FutureWarning, match=depr_msg):\n PolynomialFeatures().fit(X).n_input_features_\n", "# Authors: Nicolas Tresegnie <[email protected]>\n# Sergey Feldman <[email protected]>\n# License: BSD 3 clause\n\nimport numbers\nimport warnings\nfrom collections import Counter\n\nimport numpy as np\nimport numpy.ma as ma\nfrom scipy import sparse as sp\nfrom scipy import stats\n\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..utils.sparsefuncs import _get_median\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.validation import FLOAT_DTYPES\nfrom ..utils._mask import _get_mask\nfrom ..utils import is_scalar_nan\n\n\ndef _check_inputs_dtype(X, missing_values):\n if X.dtype.kind in (\"f\", \"i\", \"u\") and not isinstance(missing_values, numbers.Real):\n raise ValueError(\n \"'X' and 'missing_values' types are expected to be\"\n \" both numerical. Got X.dtype={} and \"\n \" type(missing_values)={}.\".format(X.dtype, type(missing_values))\n )\n\n\ndef _most_frequent(array, extra_value, n_repeat):\n \"\"\"Compute the most frequent value in a 1d array extended with\n [extra_value] * n_repeat, where extra_value is assumed to be not part\n of the array.\"\"\"\n # Compute the most frequent value in array only\n if array.size > 0:\n if array.dtype == object:\n # scipy.stats.mode is slow with object dtype array.\n # Python Counter is more efficient\n counter = Counter(array)\n most_frequent_count = counter.most_common(1)[0][1]\n # tie breaking similarly to scipy.stats.mode\n most_frequent_value = min(\n value\n for value, count in counter.items()\n if count == most_frequent_count\n )\n else:\n mode = stats.mode(array)\n most_frequent_value = mode[0][0]\n most_frequent_count = mode[1][0]\n else:\n most_frequent_value = 0\n most_frequent_count = 0\n\n # Compare to array + [extra_value] * n_repeat\n if most_frequent_count == 0 and n_repeat == 0:\n return np.nan\n elif most_frequent_count < n_repeat:\n return extra_value\n elif most_frequent_count > n_repeat:\n return most_frequent_value\n elif most_frequent_count == n_repeat:\n # tie breaking similarly to scipy.stats.mode\n return min(most_frequent_value, extra_value)\n\n\nclass _BaseImputer(TransformerMixin, BaseEstimator):\n \"\"\"Base class for all imputers.\n\n It adds automatically support for `add_indicator`.\n \"\"\"\n\n def __init__(self, *, missing_values=np.nan, add_indicator=False):\n self.missing_values = missing_values\n self.add_indicator = add_indicator\n\n def _fit_indicator(self, X):\n \"\"\"Fit a MissingIndicator.\"\"\"\n if self.add_indicator:\n self.indicator_ = MissingIndicator(\n missing_values=self.missing_values, error_on_new=False\n )\n self.indicator_._fit(X, precomputed=True)\n else:\n self.indicator_ = None\n\n def _transform_indicator(self, X):\n \"\"\"Compute the indicator mask.'\n\n Note that X must be the original data as passed to the imputer before\n any imputation, since imputation may be done inplace in some cases.\n \"\"\"\n if self.add_indicator:\n if not hasattr(self, \"indicator_\"):\n raise ValueError(\n \"Make sure to call _fit_indicator before _transform_indicator\"\n )\n return self.indicator_.transform(X)\n\n def _concatenate_indicator(self, X_imputed, X_indicator):\n \"\"\"Concatenate indicator mask with the imputed data.\"\"\"\n if not self.add_indicator:\n return X_imputed\n\n hstack = sp.hstack if sp.issparse(X_imputed) else np.hstack\n if X_indicator is None:\n raise ValueError(\n \"Data from the missing indicator are not provided. Call \"\n \"_fit_indicator and _transform_indicator in the imputer \"\n \"implementation.\"\n )\n\n return hstack((X_imputed, X_indicator))\n\n def _more_tags(self):\n return {\"allow_nan\": is_scalar_nan(self.missing_values)}\n\n\nclass SimpleImputer(_BaseImputer):\n \"\"\"Imputation transformer for completing missing values.\n\n Read more in the :ref:`User Guide <impute>`.\n\n .. versionadded:: 0.20\n `SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer`\n estimator which is now removed.\n\n Parameters\n ----------\n missing_values : int, float, str, np.nan or None, default=np.nan\n The placeholder for the missing values. All occurrences of\n `missing_values` will be imputed. For pandas' dataframes with\n nullable integer dtypes with missing values, `missing_values`\n should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.\n\n strategy : string, default='mean'\n The imputation strategy.\n\n - If \"mean\", then replace missing values using the mean along\n each column. Can only be used with numeric data.\n - If \"median\", then replace missing values using the median along\n each column. Can only be used with numeric data.\n - If \"most_frequent\", then replace missing using the most frequent\n value along each column. Can be used with strings or numeric data.\n If there is more than one such value, only the smallest is returned.\n - If \"constant\", then replace missing values with fill_value. Can be\n used with strings or numeric data.\n\n .. versionadded:: 0.20\n strategy=\"constant\" for fixed value imputation.\n\n fill_value : string or numerical value, default=None\n When strategy == \"constant\", fill_value is used to replace all\n occurrences of missing_values.\n If left to the default, fill_value will be 0 when imputing numerical\n data and \"missing_value\" for strings or object data types.\n\n verbose : integer, default=0\n Controls the verbosity of the imputer.\n\n copy : boolean, default=True\n If True, a copy of X will be created. If False, imputation will\n be done in-place whenever possible. Note that, in the following cases,\n a new copy will always be made, even if `copy=False`:\n\n - If X is not an array of floating values;\n - If X is encoded as a CSR matrix;\n - If add_indicator=True.\n\n add_indicator : boolean, default=False\n If True, a :class:`MissingIndicator` transform will stack onto output\n of the imputer's transform. This allows a predictive estimator\n to account for missingness despite imputation. If a feature has no\n missing values at fit/train time, the feature won't appear on\n the missing indicator even if there are missing values at\n transform/test time.\n\n Attributes\n ----------\n statistics_ : array of shape (n_features,)\n The imputation fill value for each feature.\n Computing statistics can result in `np.nan` values.\n During :meth:`transform`, features corresponding to `np.nan`\n statistics will be discarded.\n\n indicator_ : :class:`~sklearn.impute.MissingIndicator`\n Indicator used to add binary indicators for missing values.\n ``None`` if add_indicator is False.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n See Also\n --------\n IterativeImputer : Multivariate imputation of missing values.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.impute import SimpleImputer\n >>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')\n >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])\n SimpleImputer()\n >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]\n >>> print(imp_mean.transform(X))\n [[ 7. 2. 3. ]\n [ 4. 3.5 6. ]\n [10. 3.5 9. ]]\n\n Notes\n -----\n Columns which only contained missing values at :meth:`fit` are discarded\n upon :meth:`transform` if strategy is not \"constant\".\n\n \"\"\"\n\n def __init__(\n self,\n *,\n missing_values=np.nan,\n strategy=\"mean\",\n fill_value=None,\n verbose=0,\n copy=True,\n add_indicator=False,\n ):\n super().__init__(missing_values=missing_values, add_indicator=add_indicator)\n self.strategy = strategy\n self.fill_value = fill_value\n self.verbose = verbose\n self.copy = copy\n\n def _validate_input(self, X, in_fit):\n allowed_strategies = [\"mean\", \"median\", \"most_frequent\", \"constant\"]\n if self.strategy not in allowed_strategies:\n raise ValueError(\n \"Can only use these strategies: {0} got strategy={1}\".format(\n allowed_strategies, self.strategy\n )\n )\n\n if self.strategy in (\"most_frequent\", \"constant\"):\n # If input is a list of strings, dtype = object.\n # Otherwise ValueError is raised in SimpleImputer\n # with strategy='most_frequent' or 'constant'\n # because the list is converted to Unicode numpy array\n if isinstance(X, list) and any(\n isinstance(elem, str) for row in X for elem in row\n ):\n dtype = object\n else:\n dtype = None\n else:\n dtype = FLOAT_DTYPES\n\n if not is_scalar_nan(self.missing_values):\n force_all_finite = True\n else:\n force_all_finite = \"allow-nan\"\n\n try:\n X = self._validate_data(\n X,\n reset=in_fit,\n accept_sparse=\"csc\",\n dtype=dtype,\n force_all_finite=force_all_finite,\n copy=self.copy,\n )\n except ValueError as ve:\n if \"could not convert\" in str(ve):\n new_ve = ValueError(\n \"Cannot use {} strategy with non-numeric data:\\n{}\".format(\n self.strategy, ve\n )\n )\n raise new_ve from None\n else:\n raise ve\n\n _check_inputs_dtype(X, self.missing_values)\n if X.dtype.kind not in (\"i\", \"u\", \"f\", \"O\"):\n raise ValueError(\n \"SimpleImputer does not support data with dtype \"\n \"{0}. Please provide either a numeric array (with\"\n \" a floating point or integer dtype) or \"\n \"categorical data represented either as an array \"\n \"with integer dtype or an array of string values \"\n \"with an object dtype.\".format(X.dtype)\n )\n\n return X\n\n def fit(self, X, y=None):\n \"\"\"Fit the imputer on X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Input data, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n\n Returns\n -------\n self : SimpleImputer\n \"\"\"\n X = self._validate_input(X, in_fit=True)\n\n # default fill_value is 0 for numerical input and \"missing_value\"\n # otherwise\n if self.fill_value is None:\n if X.dtype.kind in (\"i\", \"u\", \"f\"):\n fill_value = 0\n else:\n fill_value = \"missing_value\"\n else:\n fill_value = self.fill_value\n\n # fill_value should be numerical in case of numerical input\n if (\n self.strategy == \"constant\"\n and X.dtype.kind in (\"i\", \"u\", \"f\")\n and not isinstance(fill_value, numbers.Real)\n ):\n raise ValueError(\n \"'fill_value'={0} is invalid. Expected a \"\n \"numerical value when imputing numerical \"\n \"data\".format(fill_value)\n )\n\n if sp.issparse(X):\n # missing_values = 0 not allowed with sparse data as it would\n # force densification\n if self.missing_values == 0:\n raise ValueError(\n \"Imputation not possible when missing_values \"\n \"== 0 and input is sparse. Provide a dense \"\n \"array instead.\"\n )\n else:\n self.statistics_ = self._sparse_fit(\n X, self.strategy, self.missing_values, fill_value\n )\n\n else:\n self.statistics_ = self._dense_fit(\n X, self.strategy, self.missing_values, fill_value\n )\n\n return self\n\n def _sparse_fit(self, X, strategy, missing_values, fill_value):\n \"\"\"Fit the transformer on sparse data.\"\"\"\n missing_mask = _get_mask(X, missing_values)\n mask_data = missing_mask.data\n n_implicit_zeros = X.shape[0] - np.diff(X.indptr)\n\n statistics = np.empty(X.shape[1])\n\n if strategy == \"constant\":\n # for constant strategy, self.statistcs_ is used to store\n # fill_value in each column\n statistics.fill(fill_value)\n else:\n for i in range(X.shape[1]):\n column = X.data[X.indptr[i] : X.indptr[i + 1]]\n mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]]\n column = column[~mask_column]\n\n # combine explicit and implicit zeros\n mask_zeros = _get_mask(column, 0)\n column = column[~mask_zeros]\n n_explicit_zeros = mask_zeros.sum()\n n_zeros = n_implicit_zeros[i] + n_explicit_zeros\n\n if strategy == \"mean\":\n s = column.size + n_zeros\n statistics[i] = np.nan if s == 0 else column.sum() / s\n\n elif strategy == \"median\":\n statistics[i] = _get_median(column, n_zeros)\n\n elif strategy == \"most_frequent\":\n statistics[i] = _most_frequent(column, 0, n_zeros)\n super()._fit_indicator(missing_mask)\n\n return statistics\n\n def _dense_fit(self, X, strategy, missing_values, fill_value):\n \"\"\"Fit the transformer on dense data.\"\"\"\n missing_mask = _get_mask(X, missing_values)\n masked_X = ma.masked_array(X, mask=missing_mask)\n\n super()._fit_indicator(missing_mask)\n\n # Mean\n if strategy == \"mean\":\n mean_masked = np.ma.mean(masked_X, axis=0)\n # Avoid the warning \"Warning: converting a masked element to nan.\"\n mean = np.ma.getdata(mean_masked)\n mean[np.ma.getmask(mean_masked)] = np.nan\n\n return mean\n\n # Median\n elif strategy == \"median\":\n median_masked = np.ma.median(masked_X, axis=0)\n # Avoid the warning \"Warning: converting a masked element to nan.\"\n median = np.ma.getdata(median_masked)\n median[np.ma.getmaskarray(median_masked)] = np.nan\n\n return median\n\n # Most frequent\n elif strategy == \"most_frequent\":\n # Avoid use of scipy.stats.mstats.mode due to the required\n # additional overhead and slow benchmarking performance.\n # See Issue 14325 and PR 14399 for full discussion.\n\n # To be able access the elements by columns\n X = X.transpose()\n mask = missing_mask.transpose()\n\n if X.dtype.kind == \"O\":\n most_frequent = np.empty(X.shape[0], dtype=object)\n else:\n most_frequent = np.empty(X.shape[0])\n\n for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):\n row_mask = np.logical_not(row_mask).astype(bool)\n row = row[row_mask]\n most_frequent[i] = _most_frequent(row, np.nan, 0)\n\n return most_frequent\n\n # Constant\n elif strategy == \"constant\":\n # for constant strategy, self.statistcs_ is used to store\n # fill_value in each column\n return np.full(X.shape[1], fill_value, dtype=X.dtype)\n\n def transform(self, X):\n \"\"\"Impute all missing values in X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n X_imputed : {ndarray, sparse matrix} of shape \\\n (n_samples, n_features_out)\n `X` with imputed values.\n \"\"\"\n check_is_fitted(self)\n\n X = self._validate_input(X, in_fit=False)\n statistics = self.statistics_\n\n if X.shape[1] != statistics.shape[0]:\n raise ValueError(\n \"X has %d features per sample, expected %d\"\n % (X.shape[1], self.statistics_.shape[0])\n )\n\n # compute mask before eliminating invalid features\n missing_mask = _get_mask(X, self.missing_values)\n\n # Delete the invalid columns if strategy is not constant\n if self.strategy == \"constant\":\n valid_statistics = statistics\n valid_statistics_indexes = None\n else:\n # same as np.isnan but also works for object dtypes\n invalid_mask = _get_mask(statistics, np.nan)\n valid_mask = np.logical_not(invalid_mask)\n valid_statistics = statistics[valid_mask]\n valid_statistics_indexes = np.flatnonzero(valid_mask)\n\n if invalid_mask.any():\n missing = np.arange(X.shape[1])[invalid_mask]\n if self.verbose:\n warnings.warn(\n \"Deleting features without observed values: %s\" % missing\n )\n X = X[:, valid_statistics_indexes]\n\n # Do actual imputation\n if sp.issparse(X):\n if self.missing_values == 0:\n raise ValueError(\n \"Imputation not possible when missing_values \"\n \"== 0 and input is sparse. Provide a dense \"\n \"array instead.\"\n )\n else:\n # if no invalid statistics are found, use the mask computed\n # before, else recompute mask\n if valid_statistics_indexes is None:\n mask = missing_mask.data\n else:\n mask = _get_mask(X.data, self.missing_values)\n indexes = np.repeat(\n np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr)\n )[mask]\n\n X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)\n else:\n # use mask computed before eliminating invalid mask\n if valid_statistics_indexes is None:\n mask_valid_features = missing_mask\n else:\n mask_valid_features = missing_mask[:, valid_statistics_indexes]\n n_missing = np.sum(mask_valid_features, axis=0)\n values = np.repeat(valid_statistics, n_missing)\n coordinates = np.where(mask_valid_features.transpose())[::-1]\n\n X[coordinates] = values\n\n X_indicator = super()._transform_indicator(missing_mask)\n\n return super()._concatenate_indicator(X, X_indicator)\n\n def inverse_transform(self, X):\n \"\"\"Convert the data back to the original representation.\n\n Inverts the `transform` operation performed on an array.\n This operation can only be performed after :class:`SimpleImputer` is\n instantiated with `add_indicator=True`.\n\n Note that ``inverse_transform`` can only invert the transform in\n features that have binary indicators for missing values. If a feature\n has no missing values at ``fit`` time, the feature won't have a binary\n indicator, and the imputation done at ``transform`` time won't be\n inverted.\n\n .. versionadded:: 0.24\n\n Parameters\n ----------\n X : array-like of shape \\\n (n_samples, n_features + n_features_missing_indicator)\n The imputed data to be reverted to original data. It has to be\n an augmented array of imputed data and the missing indicator mask.\n\n Returns\n -------\n X_original : ndarray of shape (n_samples, n_features)\n The original X with missing values as it was prior\n to imputation.\n \"\"\"\n check_is_fitted(self)\n\n if not self.add_indicator:\n raise ValueError(\n \"'inverse_transform' works only when \"\n \"'SimpleImputer' is instantiated with \"\n \"'add_indicator=True'. \"\n f\"Got 'add_indicator={self.add_indicator}' \"\n \"instead.\"\n )\n\n n_features_missing = len(self.indicator_.features_)\n non_empty_feature_count = X.shape[1] - n_features_missing\n array_imputed = X[:, :non_empty_feature_count].copy()\n missing_mask = X[:, non_empty_feature_count:].astype(bool)\n\n n_features_original = len(self.statistics_)\n shape_original = (X.shape[0], n_features_original)\n X_original = np.zeros(shape_original)\n X_original[:, self.indicator_.features_] = missing_mask\n full_mask = X_original.astype(bool)\n\n imputed_idx, original_idx = 0, 0\n while imputed_idx < len(array_imputed.T):\n if not np.all(X_original[:, original_idx]):\n X_original[:, original_idx] = array_imputed.T[imputed_idx]\n imputed_idx += 1\n original_idx += 1\n else:\n original_idx += 1\n\n X_original[full_mask] = self.missing_values\n return X_original\n\n\nclass MissingIndicator(TransformerMixin, BaseEstimator):\n \"\"\"Binary indicators for missing values.\n\n Note that this component typically should not be used in a vanilla\n :class:`Pipeline` consisting of transformers and a classifier, but rather\n could be added using a :class:`FeatureUnion` or :class:`ColumnTransformer`.\n\n Read more in the :ref:`User Guide <impute>`.\n\n .. versionadded:: 0.20\n\n Parameters\n ----------\n missing_values : int, float, string, np.nan or None, default=np.nan\n The placeholder for the missing values. All occurrences of\n `missing_values` will be imputed. For pandas' dataframes with\n nullable integer dtypes with missing values, `missing_values`\n should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.\n\n features : {'missing-only', 'all'}, default='missing-only'\n Whether the imputer mask should represent all or a subset of\n features.\n\n - If 'missing-only' (default), the imputer mask will only represent\n features containing missing values during fit time.\n - If 'all', the imputer mask will represent all features.\n\n sparse : bool or 'auto', default='auto'\n Whether the imputer mask format should be sparse or dense.\n\n - If 'auto' (default), the imputer mask will be of same type as\n input.\n - If True, the imputer mask will be a sparse matrix.\n - If False, the imputer mask will be a numpy array.\n\n error_on_new : bool, default=True\n If True, transform will raise an error when there are features with\n missing values in transform that have no missing values in fit. This is\n applicable only when `features='missing-only'`.\n\n Attributes\n ----------\n features_ : ndarray, shape (n_missing_features,) or (n_features,)\n The features indices which will be returned when calling ``transform``.\n They are computed during ``fit``. For ``features='all'``, it is\n to ``range(n_features)``.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.impute import MissingIndicator\n >>> X1 = np.array([[np.nan, 1, 3],\n ... [4, 0, np.nan],\n ... [8, 1, 0]])\n >>> X2 = np.array([[5, 1, np.nan],\n ... [np.nan, 2, 3],\n ... [2, 4, 0]])\n >>> indicator = MissingIndicator()\n >>> indicator.fit(X1)\n MissingIndicator()\n >>> X2_tr = indicator.transform(X2)\n >>> X2_tr\n array([[False, True],\n [ True, False],\n [False, False]])\n\n \"\"\"\n\n def __init__(\n self,\n *,\n missing_values=np.nan,\n features=\"missing-only\",\n sparse=\"auto\",\n error_on_new=True,\n ):\n self.missing_values = missing_values\n self.features = features\n self.sparse = sparse\n self.error_on_new = error_on_new\n\n def _get_missing_features_info(self, X):\n \"\"\"Compute the imputer mask and the indices of the features\n containing missing values.\n\n Parameters\n ----------\n X : {ndarray or sparse matrix}, shape (n_samples, n_features)\n The input data with missing values. Note that ``X`` has been\n checked in ``fit`` and ``transform`` before to call this function.\n\n Returns\n -------\n imputer_mask : {ndarray or sparse matrix}, shape \\\n (n_samples, n_features)\n The imputer mask of the original data.\n\n features_with_missing : ndarray, shape (n_features_with_missing)\n The features containing missing values.\n\n \"\"\"\n if not self._precomputed:\n imputer_mask = _get_mask(X, self.missing_values)\n else:\n imputer_mask = X\n\n if sp.issparse(X):\n imputer_mask.eliminate_zeros()\n\n if self.features == \"missing-only\":\n n_missing = imputer_mask.getnnz(axis=0)\n\n if self.sparse is False:\n imputer_mask = imputer_mask.toarray()\n elif imputer_mask.format == \"csr\":\n imputer_mask = imputer_mask.tocsc()\n else:\n if not self._precomputed:\n imputer_mask = _get_mask(X, self.missing_values)\n else:\n imputer_mask = X\n\n if self.features == \"missing-only\":\n n_missing = imputer_mask.sum(axis=0)\n\n if self.sparse is True:\n imputer_mask = sp.csc_matrix(imputer_mask)\n\n if self.features == \"all\":\n features_indices = np.arange(X.shape[1])\n else:\n features_indices = np.flatnonzero(n_missing)\n\n return imputer_mask, features_indices\n\n def _validate_input(self, X, in_fit):\n if not is_scalar_nan(self.missing_values):\n force_all_finite = True\n else:\n force_all_finite = \"allow-nan\"\n X = self._validate_data(\n X,\n reset=in_fit,\n accept_sparse=(\"csc\", \"csr\"),\n dtype=None,\n force_all_finite=force_all_finite,\n )\n _check_inputs_dtype(X, self.missing_values)\n if X.dtype.kind not in (\"i\", \"u\", \"f\", \"O\"):\n raise ValueError(\n \"MissingIndicator does not support data with \"\n \"dtype {0}. Please provide either a numeric array\"\n \" (with a floating point or integer dtype) or \"\n \"categorical data represented either as an array \"\n \"with integer dtype or an array of string values \"\n \"with an object dtype.\".format(X.dtype)\n )\n\n if sp.issparse(X) and self.missing_values == 0:\n # missing_values = 0 not allowed with sparse data as it would\n # force densification\n raise ValueError(\n \"Sparse input with missing_values=0 is \"\n \"not supported. Provide a dense \"\n \"array instead.\"\n )\n\n return X\n\n def _fit(self, X, y=None, precomputed=False):\n \"\"\"Fit the transformer on X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Input data, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n If `precomputed` is True, then `X` is a mask of the\n input data.\n\n precomputed : bool\n Whether the input data is a mask.\n\n Returns\n -------\n imputer_mask : {ndarray or sparse matrix}, shape (n_samples, \\\n n_features)\n The imputer mask of the original data.\n\n \"\"\"\n if precomputed:\n if not (hasattr(X, \"dtype\") and X.dtype.kind == \"b\"):\n raise ValueError(\"precomputed is True but the input data is not a mask\")\n self._precomputed = True\n else:\n self._precomputed = False\n\n # Need not validate X again as it would have already been validated\n # in the Imputer calling MissingIndicator\n if not self._precomputed:\n X = self._validate_input(X, in_fit=True)\n\n self._n_features = X.shape[1]\n\n if self.features not in (\"missing-only\", \"all\"):\n raise ValueError(\n \"'features' has to be either 'missing-only' or \"\n \"'all'. Got {} instead.\".format(self.features)\n )\n\n if not (\n (isinstance(self.sparse, str) and self.sparse == \"auto\")\n or isinstance(self.sparse, bool)\n ):\n raise ValueError(\n \"'sparse' has to be a boolean or 'auto'. Got {!r} instead.\".format(\n self.sparse\n )\n )\n\n missing_features_info = self._get_missing_features_info(X)\n self.features_ = missing_features_info[1]\n\n return missing_features_info[0]\n\n def fit(self, X, y=None):\n \"\"\"Fit the transformer on X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Input data, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n self._fit(X, y)\n\n return self\n\n def transform(self, X):\n \"\"\"Generate missing values indicator for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n Xt : {ndarray or sparse matrix}, shape (n_samples, n_features) \\\n or (n_samples, n_features_with_missing)\n The missing indicator for input data. The data type of ``Xt``\n will be boolean.\n\n \"\"\"\n check_is_fitted(self)\n\n # Need not validate X again as it would have already been validated\n # in the Imputer calling MissingIndicator\n if not self._precomputed:\n X = self._validate_input(X, in_fit=False)\n else:\n if not (hasattr(X, \"dtype\") and X.dtype.kind == \"b\"):\n raise ValueError(\"precomputed is True but the input data is not a mask\")\n\n imputer_mask, features = self._get_missing_features_info(X)\n\n if self.features == \"missing-only\":\n features_diff_fit_trans = np.setdiff1d(features, self.features_)\n if self.error_on_new and features_diff_fit_trans.size > 0:\n raise ValueError(\n \"The features {} have missing values \"\n \"in transform but have no missing values \"\n \"in fit.\".format(features_diff_fit_trans)\n )\n\n if self.features_.size < self._n_features:\n imputer_mask = imputer_mask[:, self.features_]\n\n return imputer_mask\n\n def fit_transform(self, X, y=None):\n \"\"\"Generate missing values indicator for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n Xt : {ndarray or sparse matrix}, shape (n_samples, n_features) \\\n or (n_samples, n_features_with_missing)\n The missing indicator for input data. The data type of ``Xt``\n will be boolean.\n\n \"\"\"\n imputer_mask = self._fit(X, y)\n\n if self.features_.size < self._n_features:\n imputer_mask = imputer_mask[:, self.features_]\n\n return imputer_mask\n\n def _more_tags(self):\n return {\n \"allow_nan\": True,\n \"X_types\": [\"2darray\", \"string\"],\n \"preserves_dtype\": [],\n }\n" ]
[ [ "numpy.diff", "numpy.ones_like", "numpy.asarray", "numpy.random.RandomState", "sklearn.utils.fixes.parse_version", "scipy.sparse.csc_matrix", "sklearn.linear_model.LinearRegression", "sklearn.preprocessing.PolynomialFeatures._combinations", "numpy.testing.assert_array_equal", "numpy.abs", "numpy.linspace", "sklearn.utils._testing.assert_array_almost_equal", "sklearn.preprocessing.KBinsDiscretizer", "numpy.arange", "numpy.hstack", "sklearn.utils.fixes.linspace", "scipy.sparse.random", "sklearn.preprocessing.SplineTransformer", "scipy.sparse.csr_matrix", "numpy.isfortran", "numpy.testing.assert_allclose", "numpy.array", "numpy.sin", "sklearn.preprocessing.PolynomialFeatures" ], [ "numpy.sum", "scipy.stats.mode", "numpy.diff", "numpy.ma.getmaskarray", "numpy.ma.masked_array", "scipy.sparse.csc_matrix", "numpy.logical_not", "numpy.flatnonzero", "numpy.ma.getdata", "numpy.ma.median", "numpy.ma.mean", "numpy.zeros", "numpy.ma.getmask", "numpy.setdiff1d", "numpy.repeat", "numpy.arange", "numpy.all", "numpy.empty", "scipy.sparse.issparse", "numpy.full" ] ]
tsoonjin/selam
[ "fbbb355490271bf09056e05b23245be1b75ae24d" ]
[ "selam/prepdata.py" ]
[ "#!/bin/bash\nimport os\nimport sys\nimport random\nimport cv2\nimport numpy as np\nimport xgboost as xgb\n\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA, NMF\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom selam.utils import img\n\n\ndef sample_negative(img, rect, n=1, size=(100, 100)):\n \"\"\" Sample n negative samples randomly\n @param rect: [x1, y1, x2, y2]\n @param n: number of negative samples\n @param size: size of negative window\n \"\"\"\n samples = []\n maxHeight, maxWidth = img.shape[:-1]\n width = abs(rect[0] - rect[2])\n height = abs(rect[1] - rect[3])\n while len(samples) != n:\n tmpX = int(random.random() * (maxWidth - width))\n tmpY = int(random.random() * (maxHeight - height))\n isNotOverlapX = tmpX + width < rect[0] or tmpX > rect[2]\n isNotOverlapY = tmpY + height < rect[1] or tmpY > rect[3]\n # Only accepts sample that does not overlap with ground truth\n if isNotOverlapX and isNotOverlapY:\n samples.append(cv2.resize(\n img[tmpY: tmpY + height, tmpX: tmpX + width], size))\n return samples\n\ndef get_roi(img, rect, size=(100, 100)):\n \"\"\" Return extracted bounding box given 4 corners of a rectangle\n size: size of training image\n @return roi, [x1, y1, x2, y2]\n \"\"\"\n xpos = rect[0::2]\n ypos = rect[1::2]\n y = [int(min(ypos)), int(max(ypos))]\n x = [int(min(xpos)), int(max(xpos))]\n roi = img[y[0]:y[1], x[0]:x[1]]\n return cv2.resize(roi, size), [x[0], y[0], x[1], y[1]]\n\n\ndef get_jpgs(dirpath, skip=0, resize=None):\n \"\"\" Returns all images located in given dirpath\n skip : number of frames skip to reduce computation time\n resize: scale factor for resize\n\n \"\"\"\n filenames = os.listdir(dirpath)\n # Only attempt to parse and sort files that end with .jpg\n filenames = [filename for filename in filenames\n if filename.endswith(\".jpg\") or filename.endswith(\".png\")]\n filenames.sort(key=lambda x: int(x.split('.', 1)[0]))\n frames = [cv2.imread('{}/{}'.format(dirpath, filename))\n for filename in filenames]\n out = frames[0::skip] if skip > 0 else frames\n print('Read {} images from {}'.format(len(out), dirpath))\n if resize:\n new_size = (out[0].shape[1] / resize, out[0].shape[0] / resize)\n return map(lambda x: cv2.resize(x, new_size), out)\n return out\n\n\ndef extract_training(dataset_path, annotation):\n \"\"\" Returns a list of labelled images as positive training data\n Uses default size of 100 x 100 as training patch\n @return positive samples, negative samples\n \"\"\"\n positives = []\n negatives = []\n imgs = get_jpgs(dataset_path)\n with open(annotation) as ann:\n for i, label in zip(imgs, ann):\n rect = map(float, label.rstrip().split(','))\n if rect[0] > 0:\n roi, coord = get_roi(i, rect)\n negatives.extend(sample_negative(i, coord))\n positives.append(roi)\n print(\"{} positive samples\".format(len(positives)))\n print(\"{} negative samples\".format(len(negatives)))\n return positives, negatives\n\n\ndef augment_data(imgs, augment_dir, prefix, n=20):\n \"\"\" Augment imgs with various transformations \n @param augment_dir: directory to save augmented images\n @param prefix: prefix of filename\n @param n: number of transformations per image\n \"\"\"\n n_samples = len(imgs)\n datagen = ImageDataGenerator(\n rotation_range=90,\n width_shift_range=0.2,\n height_shift_range=0.2,\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\n\n for i in imgs:\n selected = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)\n selected = selected.reshape((1, ) + selected.shape)\n for x, batch in enumerate(datagen.flow(selected, batch_size=1,\n save_to_dir=augment_dir,\n save_prefix=prefix,\n save_format='jpeg')):\n if x > n:\n break\n\n\ndef kfold(x, y, eval_size=0.10):\n \"\"\" Split dataset into training set and validation set\n @param eval_size: percentage of data used for evaluation\n @return X_train, X_valid, Y_train, Y_valid\n \"\"\"\n return train_test_split(x, y, test_size=eval_size, random_state=0)\n\n\ndef std_zscore(X):\n \"\"\" Z-score standardization by subtracting mean and divided by standard\n deviation of dataset\n \"\"\"\n scaler = preprocessing.StandardScaler().fit(X)\n return scaler.transform(X)\n\n\ndef std_minmax(X):\n scaler = preprocessing.MinMaxScaler().fit(X)\n return scaler.transform(X)\n\n\ndef reduce_pca(X, h, w, n=15, display=True):\n \"\"\" Performs PCA decomposition using n components \"\"\"\n pca = PCA(n_components=n, svd_solver='randomized',\n whiten=True).fit(X)\n eigenfaces = pca.components_.reshape((n, h, w, -1))\n if display:\n for i in eigenfaces:\n cv2.imshow('PC', np.uint8(img.normUnity(np.mean(i, axis=2)) * 255))\n cv2.waitKey(0)\n return pca.transform(X)\n\n\ndef reduce_nmf(X, h, w, n=15, display=False):\n \"\"\" Performs Non-negative matrix factorization using n components \"\"\"\n model = NMF(n_components=n, init='random', random_state=0).fit(X)\n components = model.components_.reshape((n, h, w, -1))\n if display:\n for i in components:\n cv2.imshow('PC', np.uint8(img.normUnity(np.mean(i, axis=2)) * 255))\n cv2.waitKey(0)\n return model.transform(X)\n\n\ndef classify_svm(X_train, Y_train):\n param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],\n 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}\n clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)\n clf.fit(X_train, Y_train)\n return clf\n\n\ndef classify_rf(X_train, Y_train):\n param_grid = {'n_estimators': [50, 200, 700],\n 'max_features': ['auto', 'sqrt', 'log2']}\n clf = GridSearchCV(RandomForestClassifier(n_estimators=500, oob_score=True), param_grid)\n clf.fit(X_train, Y_train)\n return clf\n\n\ndef classify_gp(X, Y):\n # Using same lengthscale for all features\n kernel = 1.0 * RBF([1.0])\n gpc_rbf = GaussianProcessClassifier(kernel=kernel).fit(X, Y)\n return gpc_rbf\n\n\ndef classify_xgb(X, Y):\n xgb_model = xgb.XGBClassifier()\n parameters = {'nthread':[4], #when use hyperthread, xgboost may become slower\n\t\t 'objective':['binary:logistic'],\n\t\t 'learning_rate': [0.05], #so called `eta` value\n\t\t 'max_depth': [6],\n\t\t 'min_child_weight': [11],\n\t\t 'silent': [1],\n\t\t 'subsample': [0.8],\n\t\t 'colsample_bytree': [0.7],\n\t\t 'n_estimators': [5], #number of trees, change it to 1000 for better results\n\t\t 'missing':[-999],\n\t\t 'seed': [1337]}\n clf = GridSearchCV(xgb_model, parameters)\n clf.fit(X, Y)\n return clf\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 4:\n print(\"Usage: python extract_region.py <dataset directory> <annotation file> <prefix> \\n\")\n exit()\n positives, negatives = extract_training(sys.argv[1], sys.argv[2])\n" ]
[ [ "sklearn.svm.SVC", "numpy.mean", "sklearn.gaussian_process.GaussianProcessClassifier", "sklearn.preprocessing.MinMaxScaler", "sklearn.decomposition.NMF", "sklearn.model_selection.GridSearchCV", "sklearn.ensemble.RandomForestClassifier", "sklearn.preprocessing.StandardScaler", "sklearn.gaussian_process.kernels.RBF", "sklearn.model_selection.train_test_split", "sklearn.decomposition.PCA" ] ]
michalnand/reinforcement_learning_agents
[ "45f02c23b1135c87311dce5a52f6e643e4313fc3" ]
[ "RLAgents/lib_common/WrapperSuperMario.py" ]
[ "import gym\nimport numpy\nfrom PIL import Image\n\nfrom nes_py.wrappers import JoypadSpace\nfrom gym_super_mario_bros.actions import COMPLEX_MOVEMENT\n\nclass NopOpsEnv(gym.Wrapper):\n def __init__(self, env=None, max_count=30):\n super(NopOpsEnv, self).__init__(env)\n self.max_count = max_count\n\n def reset(self):\n self.env.reset()\n\n noops = numpy.random.randint(1, self.max_count + 1)\n \n for _ in range(noops):\n obs, _, _, _ = self.env.step(0)\n \n return obs\n\n\nclass SkipEnv(gym.Wrapper):\n def __init__(self, env, skip = 4):\n gym.Wrapper.__init__(self, env)\n self._skip = skip\n\n def step(self, action):\n total_reward = 0.0\n done = None\n for i in range(self._skip):\n state, reward, done, info = self.env.step(action)\n total_reward+= reward\n if done:\n break\n\n return state, total_reward, done, info\n\n\nclass ResizeEnv(gym.ObservationWrapper):\n def __init__(self, env, height = 96, width = 96, frame_stacking = 4):\n super(ResizeEnv, self).__init__(env)\n self.height = height\n self.width = width\n self.frame_stacking = frame_stacking\n\n state_shape = (self.frame_stacking, self.height, self.width)\n self.dtype = numpy.float32\n\n self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=state_shape, dtype=self.dtype)\n self.state = numpy.zeros(state_shape, dtype=self.dtype)\n\n def observation(self, state):\n img = Image.fromarray(state)\n img = img.convert('L')\n img = img.resize((self.height, self.width))\n\n for i in reversed(range(self.frame_stacking-1)):\n self.state[i+1] = self.state[i].copy()\n self.state[0] = numpy.array(img).astype(self.dtype)/255.0\n\n return self.state\n\n\n\n\nclass ClipRewardEnv(gym.Wrapper):\n def __init__(self, env, no_rewards = False):\n gym.Wrapper.__init__(self, env)\n\n self.raw_episodes = 0\n self.raw_score = 0.0\n self.raw_score_per_episode = 0.0\n self.raw_score_total = 0.0 \n self.no_rewards = no_rewards\n\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n\n self.raw_score+= reward\n self.raw_score_total+= reward\n\n if done:\n self.raw_episodes+= 1\n k = 0.1\n self.raw_score_per_episode = (1.0 - k)*self.raw_score_per_episode + k*self.raw_score\n self.raw_score = 0.0\n\n reward = reward/15.0\n\n if self.no_rewards:\n reward = 0.0\n\n return obs, reward, done, info\n\n\n\ndef WrapperSuperMario(env, height = 96, width = 96, frame_stacking=4, frame_skipping=4):\n env = JoypadSpace(env, COMPLEX_MOVEMENT)\n \n env = NopOpsEnv(env)\n env = SkipEnv(env, frame_skipping)\n env = ResizeEnv(env, height, width, frame_stacking)\n env = ClipRewardEnv(env, False)\n\n env.reset()\n\n return env\n\ndef WrapperSuperMarioNoRewards(env, height = 96, width = 96, frame_stacking=4, frame_skipping=4):\n env = JoypadSpace(env, COMPLEX_MOVEMENT)\n \n env = NopOpsEnv(env)\n env = SkipEnv(env, frame_skipping)\n env = ResizeEnv(env, height, width, frame_stacking)\n env = ClipRewardEnv(env, True)\n\n env.reset()\n\n return env" ]
[ [ "numpy.array", "numpy.random.randint", "numpy.zeros" ] ]
yuhaoooo/FaceAdv
[ "73e27b7ca01243a9a3d115f5fabd1008b2afb34a" ]
[ "Finetune/cosface_finetune.py" ]
[ "import os\nimport torch\nimport random\nimport numpy as np\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms, datasets\nfrom module.units.cosface_module import CosFace\n\n\ndef accuracy(logits, y):\n _, preds = torch.max(logits, 1)\n return (preds == y).float().mean()\n\n\nif __name__ == \"__main__\":\n\n random.seed(117)\n np.random.seed(117)\n torch.manual_seed(117)\n torch.cuda.manual_seed(117)\n\n transform = transforms.Compose([\n transforms.Resize((112, 96)),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n cosface = CosFace(classnum=156, pretrained=r'..\\Auxiliary\\PretrainedFeatureExtractor\\ACC99.28.pth').to(device)\n\n dataset_dir = r'..\\Auxiliary\\ClippedFaceBank'\n dataset = datasets.ImageFolder(\n dataset_dir, transform=transform)\n len_imgs = int(len(dataset) * 0.2)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [len(dataset) - len_imgs, len_imgs])\n\n batch_size = 32\n workers = 0 if os.name == 'nt' else 8\n epochs = 20\n train_loader = DataLoader(\n train_dataset,\n num_workers=workers,\n batch_size=batch_size,\n shuffle=True\n )\n test_loader = DataLoader(\n test_dataset,\n num_workers=workers,\n batch_size=1,\n shuffle=False\n )\n\n optimizer = optim.Adam(cosface.logits.parameters(), lr=1e-3)\n\n loss_fn = torch.nn.CrossEntropyLoss()\n\n cosface.backbone.eval()\n\n best_acc, best_state_dict = 0., {}\n for epoch in range(epochs):\n print('\\nEpoch {}/{}'.format(epoch + 1, epochs))\n print('-' * 10)\n\n cosface.logits.train()\n loss = 0.0\n acc = 0.0\n for i_batch, (x, y) in enumerate(train_loader):\n x = x.to(device)\n y = y.to(device)\n optimizer.zero_grad()\n y_pred = cosface(x)\n loss_batch = loss_fn(y_pred, y)\n # update\n loss_batch.backward()\n optimizer.step()\n loss += loss_batch.detach().cpu().numpy()\n acc += accuracy(y_pred, y).detach().cpu().numpy()\n loss /= (i_batch + 1)\n acc /= (i_batch + 1)\n print('The train loss is {}, The accuracy is {}'.format(loss, acc))\n\n cosface.logits.eval()\n loss, acc = 0.0, 0.0\n for i_batch, (x, y) in enumerate(test_loader):\n x = x.to(device)\n y = y.to(device)\n y_pred = cosface(x)\n loss_batch = loss_fn(y_pred, y)\n # update\n loss += loss_batch.detach().cpu().numpy()\n acc += accuracy(y_pred, y).detach().cpu().numpy()\n loss /= (i_batch + 1)\n acc /= (i_batch + 1)\n print('The test loss is {}, The accuracy is {}'.format(loss, acc))\n\n if best_acc < acc:\n best_acc = acc\n best_state_dict = cosface.state_dict()\n\n os.makedirs(r'..\\Auxiliary\\PretrainedFaceRecognizer', exist_ok=True)\n torch.save(best_state_dict, r'..\\Auxiliary\\PretrainedFaceRecognizer\\finetuned_cosface.pt')\n" ]
[ [ "torch.utils.data.DataLoader", "torch.cuda.manual_seed", "torch.manual_seed", "torch.save", "numpy.random.seed", "torch.nn.CrossEntropyLoss", "torch.cuda.is_available", "torch.max" ] ]
hybug/RL_Lab
[ "0748e143a0fb60b9912ca28fbebc25e8f97a2fe4" ]
[ "examples/PPO_super_mario_bros/env.py" ]
[ "'''\nAuthor: hanyu\nDate: 2020-11-06 13:04:12\nLastEditTime: 2021-01-09 09:07:08\nLastEditors: hanyu\nDescription: environment\nFilePath: /test_ppo/examples/PPO_super_mario_bros/env.py\n'''\nimport logging\nimport numpy as np\n\nfrom collections import namedtuple\n\n\n# todo, to common\ndef padding(input, seqlen, dtype):\n input = np.array(input, dtype=dtype)\n if len(input) >= seqlen:\n return input\n shape = input.shape\n pad = np.tile(\n np.zeros_like(input[0:1], dtype=dtype),\n [seqlen - shape[0]] + (len(shape) - 1) * [1])\n return np.concatenate([input, pad], axis=0)\n\n\nSeg = namedtuple(\"Seg\", [\"s\", \"a\", \"a_logits\",\n \"r\", \"gaes\", \"v_cur\", \"state_in\"])\n\n\ndef _warp_env():\n import random\n from utils.get_gaes import get_gaes\n import gym_super_mario_bros\n from PIL import Image\n from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT\n from nes_py.wrappers import JoypadSpace\n\n class Env(object):\n \"\"\"\n Raw single environment of game\n \"\"\"\n\n def __init__(self, act_space, act_repeats, frames, state_size, burn_in, seqlen, game):\n '''\n description: init basic params settings.\n param {\n act_space: agent act spaces.\n act_repeats: one a repeats number, default as 1.\n frames: stack of frames for each state.\n state_size: state_size calculated in build_policy_evaluator().\n burn_in: sequences length of each burn-in(dropped) segment.\n seqlen: sequences length of each training segment.\n game: game environment.\n }\n return {None}\n '''\n self.act_space = act_space\n self.act_repeats = act_repeats\n self.act_repeat = random.choice(self.act_repeats)\n self.frames = frames\n self.state_size = state_size\n self.game = game\n self.burn_in = burn_in\n self.seqlen = seqlen\n\n self.max_pos = -10000\n\n self.count = 0\n\n # make gym env from gym_super_mario_bros\n env = gym_super_mario_bros.make(game)\n # warp the raw env through JoypadSpace according act_space\n if self.act_space == 7:\n self.env = JoypadSpace(env, SIMPLE_MOVEMENT)\n elif self.act_space == 12:\n self.env = JoypadSpace(env, COMPLEX_MOVEMENT)\n\n # resize the output image to 84*84 & normalize the pixel\n # input: (240, 256, 3)\n # output: (84, 84, 1)\n s_t = self.resize_image(self.env.reset())\n # expand the state dimension\n # output: (84, 84, frames)\n self.s_t = np.tile(s_t, [1, 1, frames])\n # add the batch_size dimension\n # output: (batch_size, 84, 84, frames)\n self.s = [self.s_t]\n\n # action shape: (batch_size, )\n self.a_t = random.randint(0, act_space - 1)\n self.a = [self.a_t]\n # action logits shape: (batch_size, act_space)\n self.a_logits = []\n self.r = [0]\n self.pos = []\n\n self.v_cur = []\n\n # decides according to build_policy_evaluator()\n state_in = np.zeros(self.state_size, dtype=np.float32)\n # state_in shape: (batch_size, state_in_number)\n self.state_in = [state_in]\n\n self.done = False\n\n def step(self, a, a_logits, v_cur, state_in, force=False):\n '''\n description: step function\n param {\n a: step action\n a_logits: action logits\n v_cur: current value\n state_in: state_in\n force: force flag\n }\n return {\n segs: list of [\"s\", \"a\", \"a_logits\", \"r\", \"gaes\", \"v_cur\", \"state_in\"]\n }\n '''\n # repeat the last action or step the current action\n # according to the act_repeat\n self.count += 1\n if self.count % self.act_repeat == 0:\n self.a_t = a\n self.count = 0\n self.act_repeat = random.choice(self.act_repeats)\n\n # step the action and get the result\n gs_t1, gr_t, gdone, ginfo = self.env.step(self.a_t)\n if not gdone:\n s_t1, r_t, done, info = self.env.step(self.a_t)\n r_t += gr_t\n r_t /= 2.\n else:\n s_t1 = gs_t1\n r_t = gr_t\n done = gdone\n info = ginfo\n # reward scaling\n r_t /= 15.\n s_t1 = self.resize_image(s_t1)\n channels = s_t1.shape[-1]\n # concatenate s_t1(the last stacked frame)\n # to self.s_t(drop the first stacked frame)\n self.s_t = np.concatenate(\n [s_t1, self.s_t[:, :, :-channels]], axis=-1)\n\n self.s.append(self.s_t)\n self.a.append(self.a_t)\n self.a_logits.append(a_logits)\n self.r.append(r_t)\n self.max_pos = max(self.max_pos, info[\"x_pos\"])\n self.pos.append(info[\"x_pos\"])\n if (len(self.pos) > 100) and (\n info[\"x_pos\"] - self.pos[-100] < 5) and (\n self.pos[-100] - info[\"x_pos\"] < 5):\n done = True\n self.done = done\n\n self.v_cur.append(v_cur)\n self.state_in.append(state_in)\n\n \"\"\"\n get segs\n \"\"\"\n segs = self.get_history(force)\n\n \"\"\"\n reset env\n \"\"\"\n self.reset(force)\n\n return segs\n\n def reset(self, force=False):\n if self.done or force:\n max_pos = self.max_pos\n self.max_pos = -10000\n print(\" Max Position %s : %d\" % (self.game, max_pos))\n self.count = 0\n self.act_repeat = random.choice(self.act_repeats)\n\n s_t = self.resize_image(self.env.reset())\n\n self.s_t = np.tile(s_t, [1, 1, self.frames])\n self.s = [self.s_t]\n\n self.a_t = random.randint(0, self.act_space - 1)\n self.a = [self.a_t]\n self.a_logits = []\n self.r = [0]\n self.pos = []\n\n self.v_cur = []\n\n state_in = np.zeros(self.state_size, dtype=np.float32)\n self.state_in = [state_in]\n\n self.done = False\n\n def get_state(self):\n return self.s_t\n\n def get_act(self):\n return self.a_t\n\n def get_max_pos(self):\n return self.max_pos\n\n def reset_max_pos(self):\n self.max_pos = -10000\n\n def get_state_in(self):\n return self.state_in[-1]\n\n def get_history(self, force=False):\n if self.done or force:\n if self.done:\n # using Generalized Advantage Estimator estimate Advantage\n gaes, _ = get_gaes(None, self.r, self.v_cur,\n self.v_cur[1:] + [0], 0.99, 0.95)\n seg = Seg(self.s, self.a, self.a_logits, self.r,\n gaes, self.v_cur, self.state_in)\n return self.postprocess(seg)\n if force and len(self.r) > 1:\n gaes, _ = get_gaes(\n None, self.r[:-1], self.v_cur[:-1], self.v_cur[1:], 0.99, 0.95)\n seg = Seg(self.s[:-1], self.a[:-1], self.a_logits[:-1], self.r[:-1], gaes,\n self.v_cur[:-1], self.state_in[:-1])\n return self.postprocess(seg)\n return None\n\n @staticmethod\n def resize_image(image, size=84):\n '''\n description: resize and norm the image\n param {\n image: image of np.array\n size: the size after resize\n }\n return {the image after resize and norm}\n '''\n image = Image.fromarray(image)\n image = image.convert(\"L\")\n image = image.resize((size, size))\n image = np.array(image)\n image = image / 255.\n image = np.array(image, np.float32)\n return image[:, :, None]\n\n def postprocess(self, seg):\n \"\"\"\n postprocess the seg for training,\n split the raw seg into several seqlen segs.\n \"\"\"\n burn_in = self.burn_in\n seqlen = self.seqlen + burn_in\n seg_results = []\n if seg is not None:\n while len(seg[0]) > burn_in:\n next_seg = dict()\n # input: (121(depends on done timing), 84, 84, frames)\n # output: (seqlen, 84, 84, frames)\n next_seg[\"s\"] = padding(seg.s[:seqlen], seqlen, np.float32)\n next_seg[\"a\"] = padding(\n seg.a[1:seqlen + 1], seqlen, np.int32)\n next_seg[\"prev_a\"] = padding(\n seg.a[:seqlen], seqlen, np.int32)\n next_seg[\"a_logits\"] = padding(\n seg.a_logits[:seqlen], seqlen, np.float32)\n next_seg[\"r\"] = padding(\n seg.r[1:seqlen + 1], seqlen, np.float32)\n next_seg[\"prev_r\"] = padding(\n seg.r[:seqlen], seqlen, np.float32)\n next_seg[\"adv\"] = padding(\n seg.gaes[:seqlen], seqlen, np.float32)\n next_seg[\"v_cur\"] = padding(\n seg.v_cur[:seqlen], seqlen, np.float32)\n next_seg[\"state_in\"] = np.array(\n seg.state_in[0], np.float32)\n next_seg[\"slots\"] = padding(\n len(seg.s[:seqlen]) * [1], seqlen, np.int32)\n\n seg_results.append(next_seg)\n seg = Seg(*[t[burn_in:] for t in seg])\n if any(seg_results):\n # print(\"full use one segs done!\")\n return seg_results\n else:\n return None\n\n class Envs(object):\n def __init__(self, act_space, act_repeats, frames,\n state_size, burn_in, seqlen, games):\n '''\n description: init the environment list \n param {params}\n return {*}\n '''\n self.envs = []\n for game in games:\n env = Env(act_space, act_repeats, frames,\n state_size, burn_in, seqlen, game)\n self.envs.append(env)\n\n def step(self, sess, model):\n '''\n description: step action according to neural network model\n param {\n sess: tensorflow session\n model: the neural network model\n }\n return {the list of Seg}\n '''\n feed_dict = self.get_feed_dict(model)\n\n # get predicted action from model\n a, a_logits, v_cur, state_in = sess.run(\n [model.current_act, model.current_act_logits,\n model.current_value, model.state_out],\n feed_dict=feed_dict\n )\n\n # step the predicted action in turn\n segs = [env.step(\n a[i][0],\n a_logits[i][0],\n v_cur[i][0],\n state_in[i]\n ) for (i, env) in enumerate(self.envs)]\n\n segs = [t2 for t1 in segs if t1 is not None for t2 in t1]\n\n return segs\n\n def get_feed_dict(self, model):\n '''\n description: get the feed_dict of model\n param {*}\n return {*}\n '''\n feed_dict = dict()\n feed_dict[model.s_t] = [[env.get_state()] for env in self.envs]\n feed_dict[model.previous_actions] = [[env.get_act()]\n for env in self.envs]\n feed_dict[model.prev_r] = [[env.r[-1]] for env in self.envs]\n feed_dict[model.state_in] = [env.get_state_in()\n for env in self.envs]\n return feed_dict\n\n return Envs\n\n\ndef build_env(kwargs):\n Envs = _warp_env()\n state_size = kwargs['state_size']\n action_repeats = kwargs['action_repeats']\n frames = kwargs[\"frames\"]\n parallel = kwargs['parallel']\n act_space = kwargs['act_space']\n burn_in = kwargs['burn_in']\n seqlen = kwargs['seqlen']\n\n games = [\"SuperMarioBros-%d-%d-v0\" %\n (i, j) for i in range(1, 9) for j in range(1, 5)]\n games = games * (parallel // len(games))\n\n envs = Envs(act_space, action_repeats, frames,\n state_size, burn_in, seqlen, games)\n\n return envs\n" ]
[ [ "numpy.zeros_like", "numpy.tile", "numpy.zeros", "numpy.array", "numpy.concatenate" ] ]
monkeypants/CartridgeOCR
[ "a2cdaa72e3839a881118b85f5ff7b4515579004b" ]
[ "src/model/dataProcessing/coco_utils.py" ]
[ "import copy\nimport os\nimport torch\nimport torch.utils.data\nimport torchvision\nfrom pycocotools import mask as coco_mask\nfrom pycocotools.coco import COCO\nimport dataProcessing.transforms as T\nimport logging\n\n\nclass FilterAndRemapCocoCategories(object):\n def __init__(self, categories, remap=True):\n self.categories = categories\n self.remap = remap\n\n def __call__(self, image, target):\n anno = target[\"annotations\"]\n anno = [obj for obj in anno if obj[\"category_id\"] in self.categories]\n if not self.remap:\n target[\"annotations\"] = anno\n return image, target\n anno = copy.deepcopy(anno)\n for obj in anno:\n obj[\"category_id\"] = self.categories.index(obj[\"category_id\"])\n target[\"annotations\"] = anno\n return image, target\n\n\ndef convert_polygons(polygons, height, width):\n max_width = 1080\n if width > max_width:\n logging.warn('invalid width needs normalizing')\n polyout = []\n for p in polygons:\n mult = [width, height] * (len(p) // 2)\n assert(len(mult) == len(p))\n polyout.append([x * y for x, y in zip(p, mult)])\n return polyout\n\n\ndef transform_coco_polygon(segmentations, height, width):\n result = []\n for polygons in segmentations:\n # print('polygons: ',polygons)\n polyout = convert_polygons(polygons, height, width) \n result.append(polyout)\n return result\n\n\ndef convert_coco_poly_to_mask(segmentations, height, width):\n masks = []\n for polygons in segmentations:\n # print('polygons: ',polygons)\n polygons = convert_polygons(polygons, height, width)\n # print('poly2', polygons)\n rles = coco_mask.frPyObjects(polygons, height, width)\n mask = coco_mask.decode(rles)\n if len(mask.shape) < 3:\n mask = mask[..., None]\n mask = torch.as_tensor(mask, dtype=torch.uint8)\n mask = mask.any(dim=2)\n masks.append(mask)\n if masks:\n masks = torch.stack(masks, dim=0)\n else:\n masks = torch.zeros((0, height, width), dtype=torch.uint8)\n return masks\n\n\ndef transform_coco_annotation(anno, height, width):\n anno['segmentation'] = convert_polygons(anno['segmentation'], height, width)\n anno['bbox'] = [x * y for (x, y) in zip(anno['bbox'], [width, height, width, height])]\n for i in range(2, len(anno['bbox'])):\n anno['bbox'][i] += anno['bbox'][i - 2]\n\n\nclass ConvertCocoPolysToMask(object):\n def __call__(self, image, target):\n w, h = image.size\n # print(w,h)\n\n image_id = target[\"image_id\"]\n image_id = torch.tensor([image_id])\n\n anno = target[\"annotations\"]\n \n # TODO: now fixed in the conversion script.\n # for obj in anno:\n # obj['iscrowd']=0\n\n anno = [obj for obj in anno if obj['iscrowd'] == 0]\n\n boxes = [obj[\"bbox\"] for obj in anno]\n # guard against no boxes via resizing\n boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)\n boxes[:, 2:] += boxes[:, :2]\n boxes *= torch.as_tensor([w, h, w, h])\n boxes[:, 0::2].clamp_(min=0, max=w)\n boxes[:, 1::2].clamp_(min=0, max=h)\n\n classes = [obj[\"category_id\"] for obj in anno]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n segmentations = [obj[\"segmentation\"] for obj in anno]\n masks = convert_coco_poly_to_mask(segmentations, h, w)\n\n keypoints = None\n if anno and \"keypoints\" in anno[0]:\n keypoints = [obj[\"keypoints\"] for obj in anno]\n keypoints = torch.as_tensor(keypoints, dtype=torch.float32)\n num_keypoints = keypoints.shape[0]\n if num_keypoints:\n keypoints = keypoints.view(num_keypoints, -1, 3)\n\n keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) \n boxes = boxes[keep]\n classes = classes[keep]\n masks = masks[keep]\n if keypoints is not None:\n keypoints = keypoints[keep]\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = classes\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n if keypoints is not None:\n target[\"keypoints\"] = keypoints\n\n # for conversion to coco api\n area = torch.tensor([obj[\"area\"] for obj in anno])\n iscrowd = torch.tensor([obj[\"iscrowd\"] for obj in anno])\n # iscrowd = torch.tensor([0 for obj in anno])\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n\n return image, target\n\n\ndef _coco_remove_images_without_annotations(dataset, cat_list=None):\n def _has_only_empty_bbox(anno):\n return all(any(o <= 1 for o in obj[\"bbox\"][2:]) for obj in anno)\n\n def _count_visible_keypoints(anno):\n return sum(sum(1 for v in ann[\"keypoints\"][2::3] if v > 0) for ann in anno)\n\n min_keypoints_per_image = 10\n\n def _has_valid_annotation(anno):\n # if it's empty, there is no annotation\n if len(anno) == 0:\n return False\n # if all boxes have close to zero area, there is no annotation\n if _has_only_empty_bbox(anno):\n return False\n # keypoints task have a slight different critera for considering\n # if an annotation is valid\n if \"keypoints\" not in anno[0]:\n return True\n # for keypoint detection tasks, only consider valid images those\n # containing at least min_keypoints_per_image\n if _count_visible_keypoints(anno) >= min_keypoints_per_image:\n return True\n return False\n\n assert isinstance(dataset, torchvision.datasets.CocoDetection)\n ids = []\n for ds_idx, img_id in enumerate(dataset.ids):\n ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = dataset.coco.loadAnns(ann_ids)\n if cat_list:\n anno = [obj for obj in anno if obj[\"category_id\"] in cat_list]\n if _has_valid_annotation(anno):\n ids.append(ds_idx)\n\n dataset = torch.utils.data.Subset(dataset, ids)\n return dataset\n\n\ndef convert_to_coco_api(ds):\n coco_ds = COCO()\n ann_id = 0\n dataset = {'images': [], 'categories': [], 'annotations': []}\n categories = set()\n for img_idx in range(len(ds)):\n # find better way to get target\n # targets = ds.get_annotations(img_idx)\n img, targets = ds[img_idx]\n image_id = targets[\"image_id\"].item()\n img_dict = {}\n img_dict['id'] = image_id\n img_dict['height'] = img.shape[-2]\n img_dict['width'] = img.shape[-1]\n img_dict['image'] = img\n dataset['images'].append(img_dict)\n bboxes = targets[\"boxes\"]\n bboxes[:, 2:] -= bboxes[:, :2]\n bboxes = bboxes.tolist()\n labels = targets['labels'].tolist()\n areas = targets['area'].tolist()\n iscrowd = targets['iscrowd'].tolist()\n if 'masks' in targets:\n masks = targets['masks']\n # make masks Fortran contiguous for coco_mask\n masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)\n if 'keypoints' in targets:\n keypoints = targets['keypoints']\n keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()\n num_objs = len(bboxes)\n for i in range(num_objs):\n ann = {}\n ann['image_id'] = image_id\n ann['bbox'] = bboxes[i]\n ann['category_id'] = labels[i]\n categories.add(labels[i])\n ann['area'] = areas[i]\n ann['iscrowd'] = iscrowd[i]\n ann['id'] = ann_id\n if 'masks' in targets:\n ann[\"segmentation\"] = coco_mask.encode(masks[i].numpy())\n if 'keypoints' in targets:\n ann['keypoints'] = keypoints[i]\n ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])\n dataset['annotations'].append(ann)\n ann_id += 1\n dataset['categories'] = [{'id': i} for i in sorted(categories)]\n coco_ds.dataset = dataset\n coco_ds.createIndex()\n return coco_ds\n\n\ndef get_coco_api_from_dataset(dataset):\n for i in range(10):\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n break\n if isinstance(dataset, torch.utils.data.Subset):\n dataset = dataset.dataset\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n return dataset.coco\n return convert_to_coco_api(dataset)\n\n\nclass CocoDetection(torchvision.datasets.CocoDetection):\n def __init__(self, img_folder, ann_file, transforms):\n super(CocoDetection, self).__init__(img_folder, ann_file)\n self._transforms = transforms\n\n def __getitem__(self, idx):\n img, target = super(CocoDetection, self).__getitem__(idx)\n image_id = self.ids[idx]\n # print(image_id)\n target = dict(image_id=image_id, annotations=target)\n if self._transforms is not None:\n img, target = self._transforms(img, target)\n return img, target\n\n @staticmethod\n def get_coco_api(dataset, transform=False): \n for i in range(10):\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n break\n if isinstance(dataset, torch.utils.data.Subset):\n dataset = dataset.dataset\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n if not transform:\n return dataset.coco\n else:\n return dataset.transform_coco_api()\n raise Exception(\"No instance of CocoDetection found\")\n\n def transform_coco_api(self):\n coco = copy.deepcopy(self.coco)\n\n image_sizes = {}\n for img, target in self:\n image_sizes[target['image_id'].item()] = img.size()[1:] # TODO: width vs height. Always len 3?\n\n for img in coco.dataset['images']:\n (h, w) = image_sizes[img['id']]\n img['width'] = w\n img['height'] = h\n\n for ann in coco.dataset['annotations']:\n id = ann['image_id']\n (h, w) = image_sizes[id]\n transform_coco_annotation(ann, h, w)\n\n coco.createIndex()\n return coco\n\n\ndef get_coco(root, image_set, transforms, mode='instances'):\n anno_file_template = \"{}_{}2017.json\"\n PATHS = {\n \"train\": (\"train2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"train\"))),\n \"val\": (\"val2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"val\"))),\n # \"train\": (\"val2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"val\")))\n }\n\n t = [ConvertCocoPolysToMask()]\n\n if transforms is not None:\n t.append(transforms)\n transforms = T.Compose(t)\n\n img_folder, ann_file = PATHS[image_set]\n img_folder = os.path.join(root, img_folder)\n ann_file = os.path.join(root, ann_file)\n\n dataset = CocoDetection(img_folder, ann_file, transforms=transforms)\n\n if image_set == \"train\":\n dataset = _coco_remove_images_without_annotations(dataset)\n\n # dataset = torch.utils.data.Subset(dataset, [i for i in range(500)])\n\n return dataset\n\n\ndef get_coco_kp(root, image_set, transforms):\n return get_coco(root, image_set, transforms, mode=\"person_keypoints\")\n" ]
[ [ "torch.stack", "torch.utils.data.Subset", "torch.as_tensor", "torch.tensor", "torch.zeros" ] ]
crisely09/pyscf
[ "cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6", "cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6", "cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6" ]
[ "pyscf/fci/selected_ci.py", "pyscf/pbc/dft/numint.py", "pyscf/dft/test/test_xcfun.py" ]
[ "#!/usr/bin/env python\n# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nSelected CI\n\nSimple usage::\n\n >>> from pyscf import gto, scf, ao2mo, fci\n >>> mol = gto.M(atom='C 0 0 0; C 0 0 1')\n >>> mf = scf.RHF(mol).run()\n >>> h1 = mf.mo_coeff.T.dot(mf.get_hcore()).dot(mf.mo_coeff)\n >>> h2 = ao2mo.kernel(mol, mf.mo_coeff)\n >>> e = fci.selected_ci.kernel(h1, h2, mf.mo_coeff.shape[1], mol.nelectron)[0]\n'''\n\nimport ctypes\nimport numpy\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf import ao2mo\nfrom pyscf.fci import cistring\nfrom pyscf.fci import direct_spin1\nfrom pyscf.fci import rdm\nfrom pyscf import __config__\n\nlibfci = lib.load_library('libfci')\n\ndef contract_2e(eri, civec_strs, norb, nelec, link_index=None):\n ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)\n if link_index is None:\n link_index = _all_linkstr_index(ci_strs, norb, nelec)\n cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index\n na, nlinka = cd_indexa.shape[:2]\n nb, nlinkb = cd_indexb.shape[:2]\n\n eri = ao2mo.restore(1, eri, norb)\n eri1 = eri.transpose(0,2,1,3) - eri.transpose(0,2,3,1)\n idx,idy = numpy.tril_indices(norb, -1)\n idx = idx * norb + idy\n eri1 = lib.take_2d(eri1.reshape(norb**2,-1), idx, idx) * 2\n fcivec = ci_coeff.reshape(na,nb)\n # (bb|bb)\n if nelec[1] > 1:\n mb, mlinkb = dd_indexb.shape[:2]\n fcivecT = lib.transpose(fcivec)\n ci1T = numpy.zeros((nb,na))\n libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),\n fcivecT.ctypes.data_as(ctypes.c_void_p),\n ci1T.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb),\n ctypes.c_int(nb), ctypes.c_int(na),\n ctypes.c_int(mb), ctypes.c_int(mlinkb),\n dd_indexb.ctypes.data_as(ctypes.c_void_p))\n ci1 = lib.transpose(ci1T, out=fcivecT)\n else:\n ci1 = numpy.zeros_like(fcivec)\n # (aa|aa)\n if nelec[0] > 1:\n ma, mlinka = dd_indexa.shape[:2]\n libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),\n fcivec.ctypes.data_as(ctypes.c_void_p),\n ci1.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb),\n ctypes.c_int(na), ctypes.c_int(nb),\n ctypes.c_int(ma), ctypes.c_int(mlinka),\n dd_indexa.ctypes.data_as(ctypes.c_void_p))\n\n h_ps = numpy.einsum('pqqs->ps', eri)\n eri1 = eri * 2\n for k in range(norb):\n eri1[:,:,k,k] += h_ps/nelec[0]\n eri1[k,k,:,:] += h_ps/nelec[1]\n eri1 = ao2mo.restore(4, eri1, norb)\n # (bb|aa)\n libfci.SCIcontract_2e_bbaa(eri1.ctypes.data_as(ctypes.c_void_p),\n fcivec.ctypes.data_as(ctypes.c_void_p),\n ci1.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb),\n ctypes.c_int(na), ctypes.c_int(nb),\n ctypes.c_int(nlinka), ctypes.c_int(nlinkb),\n cd_indexa.ctypes.data_as(ctypes.c_void_p),\n cd_indexb.ctypes.data_as(ctypes.c_void_p))\n\n return _as_SCIvector(ci1.reshape(ci_coeff.shape), ci_strs)\n\ndef select_strs(myci, eri, eri_pq_max, civec_max, strs, norb, nelec):\n strs = numpy.asarray(strs, dtype=numpy.int64)\n nstrs = len(strs)\n nvir = norb - nelec\n strs_add = numpy.empty((nstrs*(nelec*nvir)**2//4), dtype=numpy.int64)\n libfci.SCIselect_strs.restype = ctypes.c_int\n nadd = libfci.SCIselect_strs(strs_add.ctypes.data_as(ctypes.c_void_p),\n strs.ctypes.data_as(ctypes.c_void_p),\n eri.ctypes.data_as(ctypes.c_void_p),\n eri_pq_max.ctypes.data_as(ctypes.c_void_p),\n civec_max.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_double(myci.select_cutoff),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs))\n strs_add = sorted(set(strs_add[:nadd]) - set(strs))\n return numpy.asarray(strs_add, dtype=numpy.int64)\n\ndef enlarge_space(myci, civec_strs, eri, norb, nelec):\n if isinstance(civec_strs, (tuple, list)):\n nelec, (strsa, strsb) = _unpack(civec_strs[0], nelec)[1:]\n ci_coeff = lib.asarray(civec_strs)\n else:\n ci_coeff, nelec, (strsa, strsb) = _unpack(civec_strs, nelec)\n na = len(strsa)\n nb = len(strsb)\n ci0 = ci_coeff.reshape(-1,na,nb)\n civec_a_max = lib.norm(ci0, axis=2).max(axis=0)\n civec_b_max = lib.norm(ci0, axis=1).max(axis=0)\n ci_aidx = numpy.where(civec_a_max > myci.ci_coeff_cutoff)[0]\n ci_bidx = numpy.where(civec_b_max > myci.ci_coeff_cutoff)[0]\n civec_a_max = civec_a_max[ci_aidx]\n civec_b_max = civec_b_max[ci_bidx]\n strsa = strsa[ci_aidx]\n strsb = strsb[ci_bidx]\n\n eri = ao2mo.restore(1, eri, norb)\n eri_pq_max = abs(eri.reshape(norb**2,-1)).max(axis=1).reshape(norb,norb)\n\n strsa_add = select_strs(myci, eri, eri_pq_max, civec_a_max, strsa, norb, nelec[0])\n strsb_add = select_strs(myci, eri, eri_pq_max, civec_b_max, strsb, norb, nelec[1])\n strsa = numpy.append(strsa, strsa_add)\n strsb = numpy.append(strsb, strsb_add)\n aidx = numpy.argsort(strsa)\n bidx = numpy.argsort(strsb)\n ci_strs = (strsa[aidx], strsb[bidx])\n aidx = numpy.where(aidx < len(ci_aidx))[0]\n bidx = numpy.where(bidx < len(ci_bidx))[0]\n ma = len(strsa)\n mb = len(strsb)\n\n cs = []\n for i in range(ci0.shape[0]):\n ci1 = numpy.zeros((ma,mb))\n tmp = lib.take_2d(ci0[i], ci_aidx, ci_bidx)\n lib.takebak_2d(ci1, tmp, aidx, bidx)\n cs.append(_as_SCIvector(ci1, ci_strs))\n\n if not isinstance(civec_strs, (tuple, list)) and civec_strs.ndim < 3:\n cs = cs[0]\n return cs\n\ndef cre_des_linkstr(strs, norb, nelec, tril=False):\n '''Given intermediates, the link table to generate input strs\n '''\n strs = numpy.asarray(strs, dtype=numpy.int64)\n nvir = norb - nelec\n nstrs = len(strs)\n link_index = numpy.zeros((nstrs,nelec+nelec*nvir,4), dtype=numpy.int32)\n libfci.SCIcre_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nstrs),\n ctypes.c_int(nelec),\n strs.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(tril))\n return link_index\n\ndef cre_des_linkstr_tril(strs, norb, nelec):\n '''Given intermediates, the link table to generate input strs\n '''\n return cre_des_linkstr(strs, norb, nelec, True)\n\ndef des_des_linkstr(strs, norb, nelec, tril=False):\n '''Given intermediates, the link table to generate input strs\n '''\n if nelec < 2:\n return None\n\n strs = numpy.asarray(strs, dtype=numpy.int64)\n nvir = norb - nelec\n nstrs = len(strs)\n inter1 = numpy.empty((nstrs*nelec), dtype=numpy.int64)\n libfci.SCIdes_uniq_strs.restype = ctypes.c_int\n ninter = libfci.SCIdes_uniq_strs(inter1.ctypes.data_as(ctypes.c_void_p),\n strs.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs))\n inter1 = numpy.asarray(sorted(set(inter1[:ninter])), dtype=numpy.int64)\n ninter = len(inter1)\n\n inter = numpy.empty((ninter*nelec), dtype=numpy.int64)\n ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),\n inter1.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec-1),\n ctypes.c_int(ninter))\n inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)\n ninter = len(inter)\n\n nvir += 2\n link_index = numpy.zeros((ninter,nvir*nvir,4), dtype=numpy.int32)\n libfci.SCIdes_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs), ctypes.c_int(ninter),\n strs.ctypes.data_as(ctypes.c_void_p),\n inter.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(tril))\n return link_index\n\ndef des_des_linkstr_tril(strs, norb, nelec):\n '''Given intermediates, the link table to generate input strs\n '''\n return des_des_linkstr(strs, norb, nelec, True)\n\ndef gen_des_linkstr(strs, norb, nelec):\n '''Given intermediates, the link table to generate input strs\n '''\n if nelec < 1:\n return None\n\n strs = numpy.asarray(strs, dtype=numpy.int64)\n nvir = norb - nelec\n nstrs = len(strs)\n inter = numpy.empty((nstrs*nelec), dtype=numpy.int64)\n libfci.SCIdes_uniq_strs.restype = ctypes.c_int\n ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),\n strs.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs))\n inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)\n ninter = len(inter)\n\n nvir += 1\n link_index = numpy.zeros((ninter,nvir,4), dtype=numpy.int32)\n libfci.SCIdes_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs), ctypes.c_int(ninter),\n strs.ctypes.data_as(ctypes.c_void_p),\n inter.ctypes.data_as(ctypes.c_void_p))\n return link_index\n\ndef gen_cre_linkstr(strs, norb, nelec):\n '''Given intermediates, the link table to generate input strs\n '''\n if nelec == norb:\n return None\n\n strs = numpy.asarray(strs, dtype=numpy.int64)\n nvir = norb - nelec\n nstrs = len(strs)\n inter = numpy.empty((nstrs*nvir), dtype=numpy.int64)\n libfci.SCIcre_uniq_strs.restype = ctypes.c_int\n ninter = libfci.SCIcre_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),\n strs.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs))\n inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)\n ninter = len(inter)\n\n link_index = numpy.zeros((ninter,nelec+1,4), dtype=numpy.int32)\n libfci.SCIcre_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_int(nstrs), ctypes.c_int(ninter),\n strs.ctypes.data_as(ctypes.c_void_p),\n inter.ctypes.data_as(ctypes.c_void_p))\n return link_index\n\n\ndef make_hdiag(h1e, eri, ci_strs, norb, nelec):\n ci_coeff, nelec, ci_strs = _unpack(None, nelec, ci_strs)\n na = len(ci_strs[0])\n nb = len(ci_strs[1])\n hdiag = numpy.empty(na*nb)\n\n h1e = numpy.asarray(h1e, order='C')\n eri = ao2mo.restore(1, eri, norb)\n jdiag = numpy.asarray(numpy.einsum('iijj->ij',eri), order='C')\n kdiag = numpy.asarray(numpy.einsum('ijji->ij',eri), order='C')\n c_h1e = h1e.ctypes.data_as(ctypes.c_void_p)\n c_jdiag = jdiag.ctypes.data_as(ctypes.c_void_p)\n c_kdiag = kdiag.ctypes.data_as(ctypes.c_void_p)\n occslsta = cistring._strs2occslst(ci_strs[0], norb)\n occslstb = cistring._strs2occslst(ci_strs[1], norb)\n libfci.FCImake_hdiag_uhf(hdiag.ctypes.data_as(ctypes.c_void_p),\n c_h1e, c_h1e, c_jdiag, c_jdiag, c_jdiag, c_kdiag, c_kdiag,\n ctypes.c_int(norb),\n ctypes.c_int(na), ctypes.c_int(nb),\n ctypes.c_int(nelec[0]), ctypes.c_int(nelec[1]),\n occslsta.ctypes.data_as(ctypes.c_void_p),\n occslstb.ctypes.data_as(ctypes.c_void_p))\n return hdiag\n\ndef kernel_fixed_space(myci, h1e, eri, norb, nelec, ci_strs, ci0=None,\n tol=None, lindep=None, max_cycle=None, max_space=None,\n nroots=None, davidson_only=None,\n max_memory=None, verbose=None, ecore=0, **kwargs):\n log = logger.new_logger(myci, verbose)\n if tol is None: tol = myci.conv_tol\n if lindep is None: lindep = myci.lindep\n if max_cycle is None: max_cycle = myci.max_cycle\n if max_space is None: max_space = myci.max_space\n if max_memory is None: max_memory = myci.max_memory\n if nroots is None: nroots = myci.nroots\n if myci.verbose >= logger.WARN:\n myci.check_sanity()\n\n nelec = direct_spin1._unpack_nelec(nelec, myci.spin)\n ci0, nelec, ci_strs = _unpack(ci0, nelec, ci_strs)\n na = len(ci_strs[0])\n nb = len(ci_strs[1])\n h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)\n h2e = ao2mo.restore(1, h2e, norb)\n\n link_index = _all_linkstr_index(ci_strs, norb, nelec)\n hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)\n\n if isinstance(ci0, _SCIvector):\n if ci0.size == na*nb:\n ci0 = [ci0.ravel()]\n else:\n ci0 = [x.ravel() for x in ci0]\n else:\n ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)\n\n def hop(c):\n hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)\n return hc.reshape(-1)\n precond = lambda x, e, *args: x/(hdiag-e+1e-4)\n\n #e, c = lib.davidson(hop, ci0, precond, tol=myci.conv_tol)\n e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,\n max_cycle=max_cycle, max_space=max_space, nroots=nroots,\n max_memory=max_memory, verbose=log, **kwargs)\n if nroots > 1:\n return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]\n else:\n return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)\n\n\ndef kernel_float_space(myci, h1e, eri, norb, nelec, ci0=None,\n tol=None, lindep=None, max_cycle=None, max_space=None,\n nroots=None, davidson_only=None,\n max_memory=None, verbose=None, ecore=0, **kwargs):\n log = logger.new_logger(myci, verbose)\n if tol is None: tol = myci.conv_tol\n if lindep is None: lindep = myci.lindep\n if max_cycle is None: max_cycle = myci.max_cycle\n if max_space is None: max_space = myci.max_space\n if max_memory is None: max_memory = myci.max_memory\n if nroots is None: nroots = myci.nroots\n if myci.verbose >= logger.WARN:\n myci.check_sanity()\n\n nelec = direct_spin1._unpack_nelec(nelec, myci.spin)\n h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)\n h2e = ao2mo.restore(1, h2e, norb)\n\n# TODO: initial guess from CISD\n if isinstance(ci0, _SCIvector):\n if ci0.size == len(ci0._strs[0])*len(ci0._strs[1]):\n ci0 = [ci0.ravel()]\n else:\n ci0 = [x.ravel() for x in ci0]\n else:\n ci_strs = (numpy.asarray([int('1'*nelec[0], 2)]),\n numpy.asarray([int('1'*nelec[1], 2)]))\n ci0 = _as_SCIvector(numpy.ones((1,1)), ci_strs)\n ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)\n if ci0.size < nroots:\n log.warn('''\n Selected-CI space generated from HF ground state (by double exciting) is not enough for excited states.\n HOMO->LUMO excitations are included in the initial guess.\n NOTE: This may introduce excited states of different symmetry.\\n''')\n corea = '1' * (nelec[0]-1)\n coreb = '1' * (nelec[1]-1)\n ci_strs = (numpy.asarray([int('1'+corea, 2), int('10'+corea, 2)]),\n numpy.asarray([int('1'+coreb, 2), int('10'+coreb, 2)]))\n ci0 = _as_SCIvector(numpy.ones((2,2)), ci_strs)\n ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)\n if ci0.size < nroots:\n raise RuntimeError('Not enough selected-CI space for %d states' % nroots)\n ci_strs = ci0._strs\n hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)\n ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)\n\n def hop(c):\n hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)\n return hc.ravel()\n precond = lambda x, e, *args: x/(hdiag-e+myci.level_shift)\n\n namax = cistring.num_strings(norb, nelec[0])\n nbmax = cistring.num_strings(norb, nelec[1])\n e_last = 0\n float_tol = myci.start_tol\n tol_decay_rate = myci.tol_decay_rate\n conv = False\n for icycle in range(norb):\n ci_strs = ci0[0]._strs\n float_tol = max(float_tol*tol_decay_rate, tol*1e2)\n log.debug('cycle %d ci.shape %s float_tol %g',\n icycle, (len(ci_strs[0]), len(ci_strs[1])), float_tol)\n\n ci0 = [c.ravel() for c in ci0]\n link_index = _all_linkstr_index(ci_strs, norb, nelec)\n hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)\n #e, ci0 = lib.davidson(hop, ci0.reshape(-1), precond, tol=float_tol)\n e, ci0 = myci.eig(hop, ci0, precond, tol=float_tol, lindep=lindep,\n max_cycle=max_cycle, max_space=max_space, nroots=nroots,\n max_memory=max_memory, verbose=log, **kwargs)\n if nroots > 1:\n ci0 = [_as_SCIvector(c, ci_strs) for c in ci0]\n de, e_last = min(e)-e_last, min(e)\n log.info('cycle %d E = %s dE = %.8g', icycle, e+ecore, de)\n else:\n ci0 = [_as_SCIvector(ci0, ci_strs)]\n de, e_last = e-e_last, e\n log.info('cycle %d E = %.15g dE = %.8g', icycle, e+ecore, de)\n\n if ci0[0].shape == (namax,nbmax) or abs(de) < tol*1e3:\n conv = True\n break\n\n last_ci0_size = float(len(ci_strs[0])), float(len(ci_strs[1]))\n ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)\n na = len(ci0[0]._strs[0])\n nb = len(ci0[0]._strs[1])\n if ((.99 < na/last_ci0_size[0] < 1.01) and\n (.99 < nb/last_ci0_size[1] < 1.01)):\n conv = True\n break\n\n ci_strs = ci0[0]._strs\n log.debug('Extra CI in selected space %s', (len(ci_strs[0]), len(ci_strs[1])))\n ci0 = [c.ravel() for c in ci0]\n link_index = _all_linkstr_index(ci_strs, norb, nelec)\n hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)\n e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,\n max_cycle=max_cycle, max_space=max_space, nroots=nroots,\n max_memory=max_memory, verbose=log, **kwargs)\n\n na = len(ci_strs[0])\n nb = len(ci_strs[1])\n if nroots > 1:\n for i, ei in enumerate(e+ecore):\n log.info('Selected CI state %d E = %.15g', i, ei)\n return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]\n else:\n log.info('Selected CI E = %.15g', e+ecore)\n return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)\n\ndef kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,\n lindep=1e-14, max_cycle=50, max_space=12, nroots=1,\n davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,\n select_cutoff=1e-3, ci_coeff_cutoff=1e-3, ecore=0, **kwargs):\n return direct_spin1._kfactory(SelectedCI, h1e, eri, norb, nelec, ci0,\n level_shift, tol, lindep, max_cycle,\n max_space, nroots, davidson_only,\n pspace_size, select_cutoff=select_cutoff,\n ci_coeff_cutoff=ci_coeff_cutoff, ecore=ecore,\n **kwargs)\n\ndef make_rdm1s(civec_strs, norb, nelec, link_index=None):\n '''Spin separated 1-particle density matrices.\n The return values include two density matrices: (alpha,alpha), (beta,beta)\n\n dm1[p,q] = <q^\\dagger p>\n\n The convention is based on McWeeney's book, Eq (5.4.20).\n The contraction between 1-particle Hamiltonian and rdm1 is\n E = einsum('pq,qp', h1, rdm1)\n '''\n ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)\n if link_index is None:\n cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])\n cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])\n else:\n cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index\n rdm1a = rdm.make_rdm1_spin1('FCImake_rdm1a', ci_coeff, ci_coeff,\n norb, nelec, (cd_indexa,cd_indexb))\n rdm1b = rdm.make_rdm1_spin1('FCImake_rdm1b', ci_coeff, ci_coeff,\n norb, nelec, (cd_indexa,cd_indexb))\n return rdm1a, rdm1b\n\ndef make_rdm1(civec_strs, norb, nelec, link_index=None):\n r'''Spin-traced 1-particle density matrix.\n\n dm1[p,q] = <q_alpha^\\dagger p_alpha> + <q_beta^\\dagger p_beta>\n\n The convention is based on McWeeney's book, Eq (5.4.20)\n The contraction between 1-particle Hamiltonian and rdm1 is\n E = einsum('pq,qp', h1, rdm1)\n '''\n rdm1a, rdm1b = make_rdm1s(civec_strs, norb, nelec, link_index)\n return rdm1a + rdm1b\n\n# dm[p,q,r,s] = <|p^+ q r^+ s|>\ndef make_rdm2s(civec_strs, norb, nelec, link_index=None, **kwargs):\n r'''Spin separated 2-particle density matrices.\n The return values include three density matrices:\n (alpha,alpha,alpha,alpha), (alpha,alpha,beta,beta), (beta,beta,beta,beta)\n\n 2pdm[p,q,r,s] = :math:`\\langle p^\\dagger r^\\dagger s q\\rangle`\n '''\n ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)\n if link_index is None:\n cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])\n dd_indexa = des_des_linkstr(ci_strs[0], norb, nelec[0])\n cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])\n dd_indexb = des_des_linkstr(ci_strs[1], norb, nelec[1])\n else:\n cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index\n na, nlinka = cd_indexa.shape[:2]\n nb, nlinkb = cd_indexb.shape[:2]\n\n fcivec = ci_coeff.reshape(na,nb)\n # (bb|aa) and (aa|bb)\n dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', fcivec, fcivec,\n norb, nelec, (cd_indexa,cd_indexb), 0)[1]\n # (aa|aa)\n dm2aa = numpy.zeros([norb]*4)\n if nelec[0] > 1:\n ma, mlinka = dd_indexa.shape[:2]\n libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,\n dm2aa.ctypes.data_as(ctypes.c_void_p),\n fcivec.ctypes.data_as(ctypes.c_void_p),\n fcivec.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb),\n ctypes.c_int(na), ctypes.c_int(nb),\n ctypes.c_int(ma), ctypes.c_int(mlinka),\n dd_indexa.ctypes.data_as(ctypes.c_void_p))\n # (bb|bb)\n dm2bb = numpy.zeros([norb]*4)\n if nelec[1] > 1:\n mb, mlinkb = dd_indexb.shape[:2]\n fcivecT = lib.transpose(fcivec)\n libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,\n dm2bb.ctypes.data_as(ctypes.c_void_p),\n fcivecT.ctypes.data_as(ctypes.c_void_p),\n fcivecT.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb),\n ctypes.c_int(nb), ctypes.c_int(na),\n ctypes.c_int(mb), ctypes.c_int(mlinkb),\n dd_indexb.ctypes.data_as(ctypes.c_void_p))\n return dm2aa, dm2ab, dm2bb\n\ndef make_rdm2(civec_strs, norb, nelec, link_index=None, **kwargs):\n r'''Spin-traced two-particle density matrix.\n\n 2pdm[p,q,r,s] = :math:`\\langle p_\\alpha^\\dagger r_\\alpha^\\dagger s_\\alpha q_\\alpha\\rangle +\n \\langle p_\\beta^\\dagger r_\\alpha^\\dagger s_\\alpha q_\\beta\\rangle +\n \\langle p_\\alpha^\\dagger r_\\beta^\\dagger s_\\beta q_\\alpha\\rangle +\n \\langle p_\\beta^\\dagger r_\\beta^\\dagger s_\\beta q_\\beta\\rangle`.\n '''\n dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)\n dm2aa += dm2bb\n dm2aa += dm2ab\n dm2aa += dm2ab.transpose(2,3,0,1)\n return dm2aa\n\ndef trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index=None):\n r'''Spin separated transition 1-particle density matrices.\n See also function :func:`make_rdm1s`\n\n 1pdm[p,q] = :math:`\\langle q^\\dagger p \\rangle`\n '''\n cibra, nelec, ci_strs = _unpack(cibra_strs, nelec)\n ciket, nelec1, ci_strs1 = _unpack(ciket_strs, nelec)\n assert(all(ci_strs[0] == ci_strs1[0]) and\n all(ci_strs[1] == ci_strs1[1]))\n if link_index is None:\n cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])\n cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])\n else:\n cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index\n rdm1a = rdm.make_rdm1_spin1('FCItrans_rdm1a', cibra, ciket,\n norb, nelec, (cd_indexa,cd_indexb))\n rdm1b = rdm.make_rdm1_spin1('FCItrans_rdm1b', cibra, ciket,\n norb, nelec, (cd_indexa,cd_indexb))\n return rdm1a, rdm1b\n\ndef trans_rdm1(cibra_strs, ciket_strs, norb, nelec, link_index=None):\n r'''Spin traced transition 1-particle density matrices.\n See also function :func:`make_rdm1`\n\n 1pdm[p,q] = :math:`\\langle q_\\alpha^\\dagger p_\\alpha \\rangle\n + \\langle q_\\beta^\\dagger p_\\beta \\rangle`\n '''\n rdm1a, rdm1b = trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index)\n return rdm1a + rdm1b\n\ndef spin_square(civec_strs, norb, nelec):\n '''Spin square for RHF-FCI CI wfn only (obtained from spin-degenerated\n Hamiltonian)'''\n ci1 = contract_ss(civec_strs, norb, nelec)\n\n ss = numpy.einsum('ij,ij->', civec_strs.reshape(ci1.shape), ci1)\n s = numpy.sqrt(ss+.25) - .5\n multip = s*2+1\n return ss, multip\n\ndef contract_ss(civec_strs, norb, nelec):\n r''' S^2 |\\Psi\\rangle\n '''\n ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)\n strsa, strsb = ci_strs\n neleca, nelecb = nelec\n ci_coeff = ci_coeff.reshape(len(strsa),len(strsb))\n\n def gen_map(fstr_index, strs, nelec, des=True):\n a_index = fstr_index(strs, norb, nelec)\n amap = numpy.zeros((a_index.shape[0],norb,2), dtype=numpy.int32)\n if des:\n for k, tab in enumerate(a_index):\n sign = tab[:,3]\n tab = tab[sign!=0]\n amap[k,tab[:,1]] = tab[:,2:]\n else:\n for k, tab in enumerate(a_index):\n sign = tab[:,3]\n tab = tab[sign!=0]\n amap[k,tab[:,0]] = tab[:,2:]\n return amap\n\n if neleca > 0:\n ades = gen_map(gen_des_linkstr, strsa, neleca)\n else:\n ades = None\n\n if nelecb > 0:\n bdes = gen_map(gen_des_linkstr, strsb, nelecb)\n else:\n bdes = None\n\n if neleca < norb:\n acre = gen_map(gen_cre_linkstr, strsa, neleca, False)\n else:\n acre = None\n\n if nelecb < norb:\n bcre = gen_map(gen_cre_linkstr, strsb, nelecb, False)\n else:\n bcre = None\n\n def trans(ci1, aindex, bindex):\n if aindex is None or bindex is None:\n return None\n\n ma = len(aindex)\n mb = len(bindex)\n t1 = numpy.zeros((ma,mb))\n for i in range(norb):\n signa = aindex[:,i,1]\n signb = bindex[:,i,1]\n maska = numpy.where(signa!=0)[0]\n maskb = numpy.where(signb!=0)[0]\n addra = aindex[maska,i,0]\n addrb = bindex[maskb,i,0]\n citmp = lib.take_2d(ci_coeff, addra, addrb)\n citmp *= signa[maska].reshape(-1,1)\n citmp *= signb[maskb]\n #: t1[addra.reshape(-1,1),addrb] += citmp\n lib.takebak_2d(t1, citmp, maska, maskb)\n for i in range(norb):\n signa = aindex[:,i,1]\n signb = bindex[:,i,1]\n maska = numpy.where(signa!=0)[0]\n maskb = numpy.where(signb!=0)[0]\n addra = aindex[maska,i,0]\n addrb = bindex[maskb,i,0]\n citmp = lib.take_2d(t1, maska, maskb)\n citmp *= signa[maska].reshape(-1,1)\n citmp *= signb[maskb]\n #: ci1[maska.reshape(-1,1), maskb] += citmp\n lib.takebak_2d(ci1, citmp, addra, addrb)\n\n ci1 = numpy.zeros_like(ci_coeff)\n trans(ci1, ades, bcre) # S+*S-\n trans(ci1, acre, bdes) # S-*S+\n ci1 *= .5\n ci1 += (neleca-nelecb)**2*.25*ci_coeff\n return _as_SCIvector(ci1, ci_strs)\n\ndef to_fci(civec_strs, norb, nelec):\n ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)\n addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]\n addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]\n na = cistring.num_strings(norb, nelec[0])\n nb = cistring.num_strings(norb, nelec[1])\n ci0 = numpy.zeros((na,nb))\n lib.takebak_2d(ci0, ci_coeff, addrsa, addrsb)\n return ci0\n\ndef from_fci(fcivec, ci_strs, norb, nelec):\n fcivec, nelec, ci_strs = _unpack(fcivec, nelec, ci_strs)\n addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]\n addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]\n na = cistring.num_strings(norb, nelec[0])\n nb = cistring.num_strings(norb, nelec[1])\n fcivec = fcivec.reshape(na,nb)\n civec = lib.take_2d(fcivec, addrsa, addrsb)\n return _as_SCIvector(civec, ci_strs)\n\n\nclass SelectedCI(direct_spin1.FCISolver):\n\n ci_coeff_cutoff = getattr(__config__, 'fci_selected_ci_SCI_ci_coeff_cutoff', .5e-3)\n select_cutoff = getattr(__config__, 'fci_selected_ci_SCI_select_cutoff', .5e-3)\n conv_tol = getattr(__config__, 'fci_selected_ci_SCI_conv_tol', 1e-9)\n start_tol = getattr(__config__, 'fci_selected_ci_SCI_start_tol', 3e-4)\n tol_decay_rate = getattr(__config__, 'fci_selected_ci_SCI_tol_decay_rate', 0.3)\n\n def __init__(self, mol=None):\n direct_spin1.FCISolver.__init__(self, mol)\n\n##################################################\n# don't modify the following attributes, they are not input options\n #self.converged = False\n #self.ci = None\n self._strs = None\n keys = set(('ci_coeff_cutoff', 'select_cutoff', 'conv_tol',\n 'start_tol', 'tol_decay_rate'))\n self._keys = self._keys.union(keys)\n\n def dump_flags(self, verbose=None):\n direct_spin1.FCISolver.dump_flags(self, verbose)\n logger.info(self, 'ci_coeff_cutoff %g', self.ci_coeff_cutoff)\n logger.info(self, 'select_cutoff %g', self.select_cutoff)\n\n def contract_2e(self, eri, civec_strs, norb, nelec, link_index=None, **kwargs):\n# The argument civec_strs is a CI vector in function FCISolver.contract_2e.\n# Save and patch self._strs to make this contract_2e function compatible to\n# FCISolver.contract_2e.\n if getattr(civec_strs, '_strs', None) is not None:\n self._strs = civec_strs._strs\n else:\n assert(civec_strs.size == len(self._strs[0])*len(self._strs[1]))\n civec_strs = _as_SCIvector(civec_strs, self._strs)\n return contract_2e(eri, civec_strs, norb, nelec, link_index)\n\n def get_init_guess(self, ci_strs, norb, nelec, nroots, hdiag):\n '''Initial guess is the single Slater determinant\n '''\n na = len(ci_strs[0])\n nb = len(ci_strs[1])\n ci0 = direct_spin1._get_init_guess(na, nb, nroots, hdiag)\n return [_as_SCIvector(x, ci_strs) for x in ci0]\n\n def make_hdiag(self, h1e, eri, ci_strs, norb, nelec):\n return make_hdiag(h1e, eri, ci_strs, norb, nelec)\n\n enlarge_space = enlarge_space\n kernel = kernel_float_space\n kernel_fixed_space = kernel_fixed_space\n\n# def approx_kernel(self, h1e, eri, norb, nelec, ci0=None, link_index=None,\n# tol=None, lindep=None, max_cycle=None,\n# max_memory=None, verbose=None, **kwargs):\n# ci_strs = getattr(ci0, '_strs', self._strs)\n# return self.kernel_fixed_space(h1e, eri, norb, nelec, ci_strs,\n# ci0, link_index, tol, lindep, 6,\n# max_memory, verbose, **kwargs)\n\n @lib.with_doc(spin_square.__doc__)\n def spin_square(self, civec_strs, norb, nelec):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n return spin_square(_as_SCIvector_if_not(civec_strs, self._strs), norb, nelec)\n\n def large_ci(self, civec_strs, norb, nelec, tol=.1, return_strs=True):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n ci, _, (strsa, strsb) = _unpack(civec_strs, nelec, self._strs)\n addra, addrb = numpy.where(abs(ci) > tol)\n if return_strs:\n strsa = [bin(x) for x in strsa[addra]]\n strsb = [bin(x) for x in strsb[addrb]]\n return list(zip(ci[addra,addrb], strsa, strsb))\n else:\n occslsta = cistring._strs2occslst(strsa[addra], norb)\n occslstb = cistring._strs2occslst(strsb[addrb], norb)\n return list(zip(ci[addra,addrb], occslsta, occslstb))\n\n def contract_ss(self, fcivec, norb, nelec):\n return contract_ss(fcivec, norb, nelec)\n\n @lib.with_doc(make_rdm1s.__doc__)\n def make_rdm1s(self, civec_strs, norb, nelec, link_index=None):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)\n return make_rdm1s(civec_strs, norb, nelec, link_index)\n\n @lib.with_doc(make_rdm1.__doc__)\n def make_rdm1(self, civec_strs, norb, nelec, link_index=None):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n rdm1a, rdm1b = self.make_rdm1s(civec_strs, norb, nelec, link_index)\n return rdm1a + rdm1b\n\n @lib.with_doc(make_rdm2s.__doc__)\n def make_rdm2s(self, civec_strs, norb, nelec, link_index=None, **kwargs):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)\n return make_rdm2s(civec_strs, norb, nelec, link_index)\n\n @lib.with_doc(make_rdm2.__doc__)\n def make_rdm2(self, civec_strs, norb, nelec, link_index=None, **kwargs):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)\n return make_rdm2(civec_strs, norb, nelec, link_index)\n\n def make_rdm12s(self, civec_strs, norb, nelec, link_index=None, **kwargs):\n neleca, nelecb = nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)\n dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)\n if neleca > 1 and nelecb > 1:\n dm1a = numpy.einsum('iikl->kl', dm2aa) / (neleca-1)\n dm1b = numpy.einsum('iikl->kl', dm2bb) / (nelecb-1)\n else:\n dm1a, dm1b = make_rdm1s(civec_strs, norb, nelec, link_index)\n return (dm1a, dm1b), (dm2aa, dm2ab, dm2bb)\n\n def make_rdm12(self, civec_strs, norb, nelec, link_index=None, **kwargs):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n nelec_tot = sum(nelec)\n civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)\n dm2 = make_rdm2(civec_strs, norb, nelec, link_index)\n if nelec_tot > 1:\n dm1 = numpy.einsum('iikl->kl', dm2) / (nelec_tot-1)\n else:\n dm1 = make_rdm1(civec_strs, norb, nelec, link_index)\n return dm1, dm2\n\n @lib.with_doc(trans_rdm1s.__doc__)\n def trans_rdm1s(self, cibra, ciket, norb, nelec, link_index=None):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n cibra = _as_SCIvector_if_not(cibra, self._strs)\n ciket = _as_SCIvector_if_not(ciket, self._strs)\n return trans_rdm1s(cibra, ciket, norb, nelec, link_index)\n\n @lib.with_doc(trans_rdm1.__doc__)\n def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):\n nelec = direct_spin1._unpack_nelec(nelec, self.spin)\n cibra = _as_SCIvector_if_not(cibra, self._strs)\n ciket = _as_SCIvector_if_not(ciket, self._strs)\n return trans_rdm1(cibra, ciket, norb, nelec, link_index)\n\n def gen_linkstr(self, norb, nelec, tril=True, spin=None, ci_strs=None):\n if spin is None:\n spin = self.spin\n if ci_strs is None:\n ci_strs = self._strs\n neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)\n if tril:\n cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, neleca)\n dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, neleca)\n cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelecb)\n dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelecb)\n else:\n cd_indexa = cre_des_linkstr(ci_strs[0], norb, neleca)\n dd_indexa = des_des_linkstr(ci_strs[0], norb, neleca)\n cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelecb)\n dd_indexb = des_des_linkstr(ci_strs[1], norb, nelecb)\n return cd_indexa, dd_indexa, cd_indexb, dd_indexb\n\nSCI = SelectedCI\n\n\ndef _unpack(civec_strs, nelec, ci_strs=None, spin=None):\n neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)\n ci_strs = getattr(civec_strs, '_strs', ci_strs)\n if ci_strs is not None:\n strsa, strsb = ci_strs\n strsa = numpy.asarray(strsa)\n strsb = numpy.asarray(strsb)\n ci_strs = (strsa, strsb)\n return civec_strs, (neleca, nelecb), ci_strs\n\ndef _all_linkstr_index(ci_strs, norb, nelec):\n cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, nelec[0])\n dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, nelec[0])\n cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelec[1])\n dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelec[1])\n return cd_indexa, dd_indexa, cd_indexb, dd_indexb\n\n# numpy.ndarray does not allow to attach attribtues. Overwrite the\n# numpy.ndarray class to tag the ._strs attribute\nclass _SCIvector(numpy.ndarray):\n def __array_finalize__(self, obj):\n self._strs = getattr(obj, '_strs', None)\n\n # Whenever the contents of the array was modified (through ufunc), the tag\n # should be expired. Overwrite the output of ufunc to restore ndarray type.\n def __array_wrap__(self, out, context=None):\n return numpy.ndarray.__array_wrap__(self, out, context).view(numpy.ndarray)\n\ndef _as_SCIvector(civec, ci_strs):\n civec = civec.view(_SCIvector)\n civec._strs = ci_strs\n return civec\n\ndef _as_SCIvector_if_not(civec, ci_strs):\n if getattr(civec, '_strs', None) is None:\n civec = _as_SCIvector(civec, ci_strs)\n return civec\n\nif __name__ == '__main__':\n from functools import reduce\n from pyscf import gto\n from pyscf import scf\n from pyscf import ao2mo\n from pyscf.fci import spin_op\n from pyscf.fci import addons\n\n mol = gto.Mole()\n mol.verbose = 0\n mol.output = None\n mol.atom = [\n ['H', ( 1.,-1. , 0. )],\n ['H', ( 0.,-1. ,-1. )],\n ['H', ( 1.,-0.5 ,-1. )],\n ['H', ( 0.,-0. ,-1. )],\n ['H', ( 1.,-0.5 , 0. )],\n ['H', ( 0., 1. , 1. )],\n ['H', ( 1., 2. , 3. )],\n ['H', ( 1., 2. , 4. )],\n ]\n mol.basis = 'sto-3g'\n mol.build()\n\n m = scf.RHF(mol)\n m.kernel()\n norb = m.mo_coeff.shape[1]\n nelec = mol.nelectron\n h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))\n eri = ao2mo.kernel(m._eri, m.mo_coeff, compact=False)\n eri = eri.reshape(norb,norb,norb,norb)\n\n e1, c1 = kernel(h1e, eri, norb, nelec)\n e2, c2 = direct_spin1.kernel(h1e, eri, norb, nelec)\n print(e1, e1 - -11.894559902235565, 'diff to FCI', e1-e2)\n\n print(c1.shape, c2.shape)\n dm1_1 = make_rdm1(c1, norb, nelec)\n dm1_2 = direct_spin1.make_rdm1(c2, norb, nelec)\n print(abs(dm1_1 - dm1_2).sum())\n dm2_1 = make_rdm2(c1, norb, nelec)\n dm2_2 = direct_spin1.make_rdm12(c2, norb, nelec)[1]\n print(abs(dm2_1 - dm2_2).sum())\n\n myci = SelectedCI()\n e, c = kernel_fixed_space(myci, h1e, eri, norb, nelec, c1._strs)\n print(e - -11.894559902235565)\n\n print(myci.large_ci(c1, norb, nelec))\n print(myci.spin_square(c1, norb, nelec)[0] -\n spin_op.spin_square0(to_fci(c1, norb, nelec), norb, nelec)[0])\n\n myci = SelectedCI()\n myci = addons.fix_spin_(myci)\n e1, c1 = myci.kernel(h1e, eri, norb, nelec)\n print(e1, e1 - -11.89467612053687)\n print(myci.spin_square(c1, norb, nelec))\n", "#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Timothy Berkelbach <[email protected]>\n# Qiming Sun <[email protected]>\n#\n\nimport sys\nimport ctypes\nimport numpy\nfrom pyscf import lib\nfrom pyscf.dft import numint\nfrom pyscf.dft.numint import eval_mat, _dot_ao_ao, _dot_ao_dm\nfrom pyscf.dft.numint import _scale_ao, _contract_rho\nfrom pyscf.dft.numint import _rks_gga_wv0, _rks_gga_wv1\nfrom pyscf.dft.numint import _uks_gga_wv0, _uks_gga_wv1\nfrom pyscf.dft.numint import OCCDROP\nfrom pyscf.pbc.dft.gen_grid import libpbc, make_mask, BLKSIZE\nfrom pyscf.pbc.lib.kpts_helper import is_zero, gamma_point, member\n\n#try:\n### Moderate speedup by caching eval_ao\n# from pyscf import pbc\n# from joblib import Memory\n# memory = Memory(cachedir='./tmp/', mmap_mode='r', verbose=0)\n# def memory_cache(f):\n# g = memory.cache(f)\n# def maybe_cache(*args, **kwargs):\n# if pbc.DEBUG:\n# return g(*args, **kwargs)\n# else:\n# return f(*args, **kwargs)\n# return maybe_cache\n#except:\n# memory_cache = lambda f: f\n\ndef eval_ao(cell, coords, kpt=numpy.zeros(3), deriv=0, relativity=0, shls_slice=None,\n non0tab=None, out=None, verbose=None):\n '''Collocate AO crystal orbitals (opt. gradients) on the real-space grid.\n\n Args:\n cell : instance of :class:`Cell`\n\n coords : (nx*ny*nz, 3) ndarray\n The real-space grid point coordinates.\n\n Kwargs:\n kpt : (3,) ndarray\n The k-point corresponding to the crystal AO.\n deriv : int\n AO derivative order. It affects the shape of the return array.\n If deriv=0, the returned AO values are stored in a (N,nao) array.\n Otherwise the AO values are stored in an array of shape (M,N,nao).\n Here N is the number of grids, nao is the number of AO functions,\n M is the size associated to the derivative deriv.\n\n Returns:\n aoR : ([4,] nx*ny*nz, nao=cell.nao_nr()) ndarray\n The value of the AO crystal orbitals on the real-space grid by default.\n If deriv=1, also contains the value of the orbitals gradient in the\n x, y, and z directions. It can be either complex or float array,\n depending on the kpt argument. If kpt is not given (gamma point),\n aoR is a float array.\n\n See Also:\n pyscf.dft.numint.eval_ao\n\n '''\n ao_kpts = eval_ao_kpts(cell, coords, numpy.reshape(kpt, (-1,3)), deriv,\n relativity, shls_slice, non0tab, out, verbose)\n return ao_kpts[0]\n\n\n#@memory_cache\ndef eval_ao_kpts(cell, coords, kpts=None, deriv=0, relativity=0,\n shls_slice=None, non0tab=None, out=None, verbose=None, **kwargs):\n '''\n Returns:\n ao_kpts: (nkpts, [comp], ngrids, nao) ndarray\n AO values at each k-point\n '''\n if kpts is None:\n if 'kpt' in kwargs:\n sys.stderr.write('WARN: KNumInt.eval_ao function finds keyword '\n 'argument \"kpt\" and converts it to \"kpts\"\\n')\n kpts = kwargs['kpt']\n else:\n kpts = numpy.zeros((1,3))\n kpts = numpy.reshape(kpts, (-1,3))\n\n comp = (deriv+1)*(deriv+2)*(deriv+3)//6\n if cell.cart:\n feval = 'GTOval_cart_deriv%d' % deriv\n else:\n feval = 'GTOval_sph_deriv%d' % deriv\n return cell.pbc_eval_gto(feval, coords, comp, kpts,\n shls_slice=shls_slice, non0tab=non0tab, out=out)\n\n\ndef eval_rho(cell, ao, dm, non0tab=None, xctype='LDA', hermi=0, verbose=None):\n '''Collocate the *real* density (opt. gradients) on the real-space grid.\n\n Args:\n cell : instance of :class:`Mole` or :class:`Cell`\n\n ao : ([4,] nx*ny*nz, nao=cell.nao_nr()) ndarray\n The value of the AO crystal orbitals on the real-space grid by default.\n If xctype='GGA', also contains the value of the gradient in the x, y,\n and z directions.\n\n Returns:\n rho : ([4,] nx*ny*nz) ndarray\n The value of the density on the real-space grid. If xctype='GGA',\n also contains the value of the gradient in the x, y, and z\n directions.\n\n See Also:\n pyscf.dft.numint.eval_rho\n\n '''\n\n if xctype == 'LDA' or xctype == 'HF':\n ngrids, nao = ao.shape\n else:\n ngrids, nao = ao[0].shape\n\n if non0tab is None:\n non0tab = numpy.empty(((ngrids+BLKSIZE-1)//BLKSIZE, cell.nbas),\n dtype=numpy.uint8)\n non0tab[:] = 0xff\n\n # complex orbitals or density matrix\n if numpy.iscomplexobj(ao) or numpy.iscomplexobj(dm):\n shls_slice = (0, cell.nbas)\n ao_loc = cell.ao_loc_nr()\n dm = dm.astype(numpy.complex128)\n# For GGA, function eval_rho returns real(|\\nabla i> D_ij <j| + |i> D_ij <\\nabla j|)\n# = real(|\\nabla i> D_ij <j| + |i> D_ij <\\nabla j|)\n# = real(|\\nabla i> D_ij <j| + conj(|\\nabla j> conj(D_ij) < i|))\n# = real(|\\nabla i> D_ij <j|) + real(|\\nabla j> conj(D_ij) < i|)\n# = real(|\\nabla i> [D_ij + (D^\\dagger)_ij] <j|)\n# symmetrization dm (D + D.conj().T) then /2 because the code below computes\n# 2*real(|\\nabla i> D_ij <j|)\n if not hermi:\n dm = (dm + dm.conj().T) * .5\n\n def dot_bra(bra, aodm):\n #:rho = numpy.einsum('pi,pi->p', bra.real, aodm.real)\n #:rho += numpy.einsum('pi,pi->p', bra.imag, aodm.imag)\n #:return rho\n return _contract_rho(bra, aodm)\n\n if xctype == 'LDA' or xctype == 'HF':\n c0 = _dot_ao_dm(cell, ao, dm, non0tab, shls_slice, ao_loc)\n rho = dot_bra(ao, c0)\n\n elif xctype == 'GGA':\n rho = numpy.empty((4,ngrids))\n c0 = _dot_ao_dm(cell, ao[0], dm, non0tab, shls_slice, ao_loc)\n rho[0] = dot_bra(ao[0], c0)\n for i in range(1, 4):\n rho[i] = dot_bra(ao[i], c0) * 2\n\n else:\n # rho[4] = \\nabla^2 rho, rho[5] = 1/2 |nabla f|^2\n rho = numpy.empty((6,ngrids))\n c0 = _dot_ao_dm(cell, ao[0], dm, non0tab, shls_slice, ao_loc)\n rho[0] = dot_bra(ao[0], c0)\n rho[5] = 0\n for i in range(1, 4):\n rho[i] = dot_bra(ao[i], c0) * 2 # *2 for +c.c.\n c1 = _dot_ao_dm(cell, ao[i], dm, non0tab, shls_slice, ao_loc)\n rho[5] += dot_bra(ao[i], c1)\n XX, YY, ZZ = 4, 7, 9\n ao2 = ao[XX] + ao[YY] + ao[ZZ]\n rho[4] = dot_bra(ao2, c0)\n rho[4] += rho[5]\n rho[4] *= 2 # *2 for +c.c.\n rho[5] *= .5\n else:\n # real orbitals and real DM\n rho = numint.eval_rho(cell, ao, dm, non0tab, xctype, hermi, verbose)\n return rho\n\ndef eval_rho2(cell, ao, mo_coeff, mo_occ, non0tab=None, xctype='LDA',\n verbose=None):\n '''Refer to `pyscf.dft.numint.eval_rho2` for full documentation.\n '''\n xctype = xctype.upper()\n if xctype == 'LDA' or xctype == 'HF':\n ngrids, nao = ao.shape\n else:\n ngrids, nao = ao[0].shape\n\n if non0tab is None:\n non0tab = numpy.empty(((ngrids+BLKSIZE-1)//BLKSIZE,cell.nbas),\n dtype=numpy.uint8)\n non0tab[:] = 0xff\n\n # complex orbitals or density matrix\n if numpy.iscomplexobj(ao) or numpy.iscomplexobj(mo_coeff):\n def dot(bra, ket):\n #:rho = numpy.einsum('pi,pi->p', bra.real, ket.real)\n #:rho += numpy.einsum('pi,pi->p', bra.imag, ket.imag)\n #:return rho\n return _contract_rho(bra, ket)\n\n shls_slice = (0, cell.nbas)\n ao_loc = cell.ao_loc_nr()\n pos = mo_occ > OCCDROP\n cpos = numpy.einsum('ij,j->ij', mo_coeff[:,pos], numpy.sqrt(mo_occ[pos]))\n\n if pos.sum() > 0:\n if xctype == 'LDA' or xctype == 'HF':\n c0 = _dot_ao_dm(cell, ao, cpos, non0tab, shls_slice, ao_loc)\n rho = dot(c0, c0)\n elif xctype == 'GGA':\n rho = numpy.empty((4,ngrids))\n c0 = _dot_ao_dm(cell, ao[0], cpos, non0tab, shls_slice, ao_loc)\n rho[0] = dot(c0, c0)\n for i in range(1, 4):\n c1 = _dot_ao_dm(cell, ao[i], cpos, non0tab, shls_slice, ao_loc)\n rho[i] = dot(c0, c1) * 2 # *2 for +c.c.\n else: # meta-GGA\n # rho[4] = \\nabla^2 rho, rho[5] = 1/2 |nabla f|^2\n rho = numpy.empty((6,ngrids))\n c0 = _dot_ao_dm(cell, ao[0], cpos, non0tab, shls_slice, ao_loc)\n rho[0] = dot(c0, c0)\n rho[5] = 0\n for i in range(1, 4):\n c1 = _dot_ao_dm(cell, ao[i], cpos, non0tab, shls_slice, ao_loc)\n rho[i] = dot(c0, c1) * 2 # *2 for +c.c.\n rho[5]+= dot(c1, c1)\n XX, YY, ZZ = 4, 7, 9\n ao2 = ao[XX] + ao[YY] + ao[ZZ]\n c1 = _dot_ao_dm(cell, ao2, cpos, non0tab, shls_slice, ao_loc)\n rho[4] = dot(c0, c1)\n rho[4]+= rho[5]\n rho[4]*= 2\n rho[5]*= .5\n else:\n if xctype == 'LDA' or xctype == 'HF':\n rho = numpy.zeros(ngrids)\n elif xctype == 'GGA':\n rho = numpy.zeros((4,ngrids))\n else:\n rho = numpy.zeros((6,ngrids))\n\n neg = mo_occ < -OCCDROP\n if neg.sum() > 0:\n cneg = numpy.einsum('ij,j->ij', mo_coeff[:,neg], numpy.sqrt(-mo_occ[neg]))\n if xctype == 'LDA' or xctype == 'HF':\n c0 = _dot_ao_dm(cell, ao, cneg, non0tab, shls_slice, ao_loc)\n rho -= dot(c0, c0)\n elif xctype == 'GGA':\n c0 = _dot_ao_dm(cell, ao[0], cneg, non0tab, shls_slice, ao_loc)\n rho[0] -= dot(c0, c0)\n for i in range(1, 4):\n c1 = _dot_ao_dm(cell, ao[i], cneg, non0tab, shls_slice, ao_loc)\n rho[i] -= dot(c0, c1) * 2 # *2 for +c.c.\n else:\n c0 = _dot_ao_dm(cell, ao[0], cneg, non0tab, shls_slice, ao_loc)\n rho[0] -= dot(c0, c0)\n rho5 = 0\n for i in range(1, 4):\n c1 = _dot_ao_dm(cell, ao[i], cneg, non0tab, shls_slice, ao_loc)\n rho[i] -= dot(c0, c1) * 2 # *2 for +c.c.\n rho5 -= dot(c1, c1)\n XX, YY, ZZ = 4, 7, 9\n ao2 = ao[XX] + ao[YY] + ao[ZZ]\n c1 = _dot_ao_dm(cell, ao2, cneg, non0tab, shls_slice, ao_loc)\n rho[4] -= dot(c0, c1) * 2\n rho[4] -= rho5 * 2\n rho[5] -= rho5 * .5\n else:\n rho = numint.eval_rho2(cell, ao, mo_coeff, mo_occ, non0tab, xctype, verbose)\n return rho\n\n\ndef nr_rks(ni, cell, grids, xc_code, dms, spin=0, relativity=0, hermi=0,\n kpts=None, kpts_band=None, max_memory=2000, verbose=None):\n '''Calculate RKS XC functional and potential matrix for given meshgrids and density matrix\n\n Note: This is a replica of pyscf.dft.numint.nr_rks_vxc with kpts added.\n This implemented uses slow function in numint, which only calls eval_rho, eval_mat.\n Faster function uses eval_rho2 which is not yet implemented.\n\n Args:\n ni : an instance of :class:`NumInt` or :class:`KNumInt`\n\n cell : instance of :class:`Mole` or :class:`Cell`\n\n grids : an instance of :class:`Grids`\n grids.coords and grids.weights are needed for coordinates and weights of meshgrids.\n xc_code : str\n XC functional description.\n See :func:`parse_xc` of pyscf/dft/libxc.py for more details.\n dms : 2D/3D array or a list of 2D/3D arrays\n Density matrices (2D) / density matrices for k-points (3D)\n\n Kwargs:\n spin : int\n spin polarized if spin = 1\n relativity : int\n No effects.\n hermi : int\n No effects\n max_memory : int or float\n The maximum size of cache to use (in MB).\n verbose : int or object of :class:`Logger`\n No effects.\n kpts : (3,) ndarray or (nkpts,3) ndarray\n Single or multiple k-points sampled for the DM. Default is gamma point.\n kpts_band : (3,) ndarray or (*,3) ndarray\n A list of arbitrary \"band\" k-points at which to evaluate the XC matrix.\n\n Returns:\n nelec, excsum, vmat.\n nelec is the number of electrons generated by numerical integration.\n excsum is the XC functional value. vmat is the XC potential matrix in\n 2D array of shape (nao,nao) where nao is the number of AO functions.\n '''\n if kpts is None:\n kpts = numpy.zeros((1,3))\n\n xctype = ni._xc_type(xc_code)\n make_rho, nset, nao = ni._gen_rho_evaluator(cell, dms, hermi)\n\n nelec = numpy.zeros(nset)\n excsum = numpy.zeros(nset)\n vmat = [0]*nset\n if xctype == 'LDA':\n ao_deriv = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, kpts_band,\n max_memory):\n for i in range(nset):\n rho = make_rho(i, ao_k2, mask, xctype)\n exc, vxc = ni.eval_xc(xc_code, rho, 0, relativity, 1)[:2]\n vrho = vxc[0]\n den = rho*weight\n nelec[i] += den.sum()\n excsum[i] += (den*exc).sum()\n vmat[i] += ni.eval_mat(cell, ao_k1, weight, rho, vxc,\n mask, xctype, 0, verbose)\n elif xctype == 'GGA':\n ao_deriv = 1\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, kpts_band,\n max_memory):\n for i in range(nset):\n rho = make_rho(i, ao_k2, mask, xctype)\n exc, vxc = ni.eval_xc(xc_code, rho, 0, relativity, 1)[:2]\n den = rho[0]*weight\n nelec[i] += den.sum()\n excsum[i] += (den*exc).sum()\n vmat[i] += ni.eval_mat(cell, ao_k1, weight, rho, vxc,\n mask, xctype, 0, verbose)\n elif xctype == 'MGGA':\n if (any(x in xc_code.upper() for x in ('CC06', 'CS', 'BR89', 'MK00'))):\n raise NotImplementedError('laplacian in meta-GGA method')\n ao_deriv = 2\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, kpts_band,\n max_memory):\n for i in range(nset):\n rho = make_rho(i, ao_k2, mask, xctype)\n exc, vxc = ni.eval_xc(xc_code, rho, 0, relativity, 1)[:2]\n den = rho[0]*weight\n nelec[i] += den.sum()\n excsum[i] += (den*exc).sum()\n vmat[i] += ni.eval_mat(cell, ao_k1, weight, rho, vxc,\n mask, xctype, 0, verbose)\n if nset == 1:\n nelec = nelec[0]\n excsum = excsum[0]\n vmat = vmat[0]\n return nelec, excsum, numpy.asarray(vmat)\n\ndef nr_uks(ni, cell, grids, xc_code, dms, spin=1, relativity=0, hermi=0,\n kpts=None, kpts_band=None, max_memory=2000, verbose=None):\n '''Calculate UKS XC functional and potential matrix for given meshgrids and density matrix\n\n Note: This is a replica of pyscf.dft.numint.nr_rks_vxc with kpts added.\n This implemented uses slow function in numint, which only calls eval_rho, eval_mat.\n Faster function uses eval_rho2 which is not yet implemented.\n\n Args:\n ni : an instance of :class:`NumInt` or :class:`KNumInt`\n\n cell : instance of :class:`Mole` or :class:`Cell`\n\n grids : an instance of :class:`Grids`\n grids.coords and grids.weights are needed for coordinates and weights of meshgrids.\n xc_code : str\n XC functional description.\n See :func:`parse_xc` of pyscf/dft/libxc.py for more details.\n dms :\n Density matrices\n\n Kwargs:\n spin : int\n spin polarized if spin = 1\n relativity : int\n No effects.\n hermi : int\n Input density matrices symmetric or not\n max_memory : int or float\n The maximum size of cache to use (in MB).\n verbose : int or object of :class:`Logger`\n No effects.\n kpts : (3,) ndarray or (nkpts,3) ndarray\n Single or multiple k-points sampled for the DM. Default is gamma point.\n kpts_band : (3,) ndarray or (*,3) ndarray\n A list of arbitrary \"band\" k-points at which to evaluate the XC matrix.\n\n Returns:\n nelec, excsum, vmat.\n nelec is the number of electrons generated by numerical integration.\n excsum is the XC functional value. vmat is the XC potential matrix in\n 2D array of shape (nao,nao) where nao is the number of AO functions.\n '''\n if kpts is None:\n kpts = numpy.zeros((1,3))\n\n xctype = ni._xc_type(xc_code)\n dma, dmb = _format_uks_dm(dms)\n nao = dma.shape[-1]\n make_rhoa, nset = ni._gen_rho_evaluator(cell, dma, hermi)[:2]\n make_rhob = ni._gen_rho_evaluator(cell, dmb, hermi)[0]\n\n nelec = numpy.zeros((2,nset))\n excsum = numpy.zeros(nset)\n vmata = [0]*nset\n vmatb = [0]*nset\n if xctype == 'LDA':\n ao_deriv = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, kpts_band,\n max_memory):\n for i in range(nset):\n rho_a = make_rhoa(i, ao_k2, mask, xctype)\n rho_b = make_rhob(i, ao_k2, mask, xctype)\n exc, vxc = ni.eval_xc(xc_code, (rho_a, rho_b),\n 1, relativity, 1, verbose)[:2]\n vrho = vxc[0]\n den = rho_a * weight\n nelec[0,i] += den.sum()\n excsum[i] += (den*exc).sum()\n den = rho_b * weight\n nelec[1,i] += den.sum()\n excsum[i] += (den*exc).sum()\n\n vmata[i] += ni.eval_mat(cell, ao_k1, weight, rho_a, vrho[:,0],\n mask, xctype, 1, verbose)\n vmatb[i] += ni.eval_mat(cell, ao_k1, weight, rho_b, vrho[:,1],\n mask, xctype, 1, verbose)\n elif xctype == 'GGA':\n ao_deriv = 1\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts,\n kpts_band, max_memory):\n for i in range(nset):\n rho_a = make_rhoa(i, ao_k2, mask, xctype)\n rho_b = make_rhob(i, ao_k2, mask, xctype)\n exc, vxc = ni.eval_xc(xc_code, (rho_a, rho_b),\n 1, relativity, 1, verbose)[:2]\n vrho, vsigma = vxc[:2]\n den = rho_a[0]*weight\n nelec[0,i] += den.sum()\n excsum[i] += (den*exc).sum()\n den = rho_b[0]*weight\n nelec[1,i] += den.sum()\n excsum[i] += (den*exc).sum()\n\n vmata[i] += ni.eval_mat(cell, ao_k1, weight, (rho_a,rho_b),\n (vrho[:,0], (vsigma[:,0],vsigma[:,1])),\n mask, xctype, 1, verbose)\n vmatb[i] += ni.eval_mat(cell, ao_k1, weight, (rho_b,rho_a),\n (vrho[:,1], (vsigma[:,2],vsigma[:,1])),\n mask, xctype, 1, verbose)\n elif xctype == 'MGGA':\n assert(all(x not in xc_code.upper() for x in ('CC06', 'CS', 'BR89', 'MK00')))\n ao_deriv = 2\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, kpts_band,\n max_memory):\n for i in range(nset):\n rho_a = make_rhoa(i, ao_k2, mask, xctype)\n rho_b = make_rhob(i, ao_k2, mask, xctype)\n exc, vxc = ni.eval_xc(xc_code, (rho_a, rho_b),\n 1, relativity, 1, verbose)[:2]\n vrho, vsigma, vlapl, vtau = vxc\n den = rho_a[0]*weight\n nelec[0,i] += den.sum()\n excsum[i] += (den*exc).sum()\n den = rho_b[0]*weight\n nelec[1,i] += den.sum()\n excsum[i] += (den*exc).sum()\n\n v = (vrho[:,0], (vsigma[:,0],vsigma[:,1]), None, vtau[:,0])\n vmata[i] += ni.eval_mat(cell, ao_k1, weight, (rho_a,rho_b), v,\n mask, xctype, 1, verbose)\n v = (vrho[:,1], (vsigma[:,2],vsigma[:,1]), None, vtau[:,1])\n vmatb[i] += ni.eval_mat(cell, ao_k1, weight, (rho_b,rho_a), v,\n mask, xctype, 1, verbose)\n v = None\n\n if dma.ndim == vmata[0].ndim: # One set of DMs in the input\n nelec = nelec[:,0]\n excsum = excsum[0]\n vmata = vmata[0]\n vmatb = vmatb[0]\n return nelec, excsum, numpy.asarray((vmata,vmatb))\n\ndef _format_uks_dm(dms):\n dma, dmb = dms\n if getattr(dms, 'mo_coeff', None) is not None:\n#TODO: test whether dm.mo_coeff matching dm\n mo_coeff = dms.mo_coeff\n mo_occ = dms.mo_occ\n if (isinstance(mo_coeff[0], numpy.ndarray) and\n mo_coeff[0].ndim < dma.ndim): # handle ROKS\n mo_occa = [numpy.array(occ> 0, dtype=numpy.double) for occ in mo_occ]\n mo_occb = [numpy.array(occ==2, dtype=numpy.double) for occ in mo_occ]\n dma = lib.tag_array(dma, mo_coeff=mo_coeff, mo_occ=mo_occa)\n dmb = lib.tag_array(dmb, mo_coeff=mo_coeff, mo_occ=mo_occb)\n else:\n dma = lib.tag_array(dma, mo_coeff=mo_coeff[0], mo_occ=mo_occ[0])\n dmb = lib.tag_array(dmb, mo_coeff=mo_coeff[1], mo_occ=mo_occ[1])\n return dma, dmb\n\nnr_rks_vxc = nr_rks\nnr_uks_vxc = nr_uks\n\ndef nr_rks_fxc(ni, cell, grids, xc_code, dm0, dms, relativity=0, hermi=0,\n rho0=None, vxc=None, fxc=None, kpts=None, max_memory=2000,\n verbose=None):\n '''Contract RKS XC kernel matrix with given density matrices\n\n Args:\n ni : an instance of :class:`NumInt` or :class:`KNumInt`\n\n cell : instance of :class:`Mole` or :class:`Cell`\n\n grids : an instance of :class:`Grids`\n grids.coords and grids.weights are needed for coordinates and weights of meshgrids.\n xc_code : str\n XC functional description.\n See :func:`parse_xc` of pyscf/dft/libxc.py for more details.\n dms : 2D/3D array or a list of 2D/3D arrays\n Density matrices (2D) / density matrices for k-points (3D)\n\n Kwargs:\n hermi : int\n Input density matrices symmetric or not\n max_memory : int or float\n The maximum size of cache to use (in MB).\n rho0 : float array\n Zero-order density (and density derivative for GGA). Giving kwargs rho0,\n vxc and fxc to improve better performance.\n vxc : float array\n First order XC derivatives\n fxc : float array\n Second order XC derivatives\n\n Examples:\n\n '''\n if kpts is None:\n kpts = numpy.zeros((1,3))\n xctype = ni._xc_type(xc_code)\n\n make_rho, nset, nao = ni._gen_rho_evaluator(cell, dms, hermi)\n if ((xctype == 'LDA' and fxc is None) or\n (xctype == 'GGA' and rho0 is None)):\n make_rho0 = ni._gen_rho_evaluator(cell, dm0, 1)[0]\n\n ao_loc = cell.ao_loc_nr()\n vmat = [0] * nset\n if xctype == 'LDA':\n ao_deriv = 0\n ip = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n ngrid = weight.size\n if fxc is None:\n rho = make_rho0(0, ao_k1, mask, xctype)\n fxc0 = ni.eval_xc(xc_code, rho, 0, relativity, 2, verbose)[2]\n frr = fxc0[0]\n else:\n frr = fxc[0][ip:ip+ngrid]\n ip += ngrid\n\n for i in range(nset):\n rho1 = make_rho(i, ao_k1, mask, xctype)\n wv = weight * frr * rho1\n vmat[i] += ni._fxc_mat(cell, ao_k1, wv, mask, xctype, ao_loc)\n\n elif xctype == 'GGA':\n ao_deriv = 1\n ip = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n ngrid = weight.size\n if rho0 is None:\n rho = make_rho0(0, ao_k1, mask, xctype)\n else:\n rho = numpy.asarray(rho0[:,ip:ip+ngrid], order='C')\n\n if vxc is None or fxc is None:\n vxc0, fxc0 = ni.eval_xc(xc_code, rho, 0, relativity, 2, verbose)[1:3]\n else:\n vxc0 = (None, vxc[1][ip:ip+ngrid])\n fxc0 = (fxc[0][ip:ip+ngrid], fxc[1][ip:ip+ngrid], fxc[2][ip:ip+ngrid])\n ip += ngrid\n\n for i in range(nset):\n rho1 = make_rho(i, ao_k1, mask, xctype)\n wv = _rks_gga_wv1(rho, rho1, vxc0, fxc0, weight)\n vmat[i] += ni._fxc_mat(cell, ao_k1, wv, mask, xctype, ao_loc)\n\n # call swapaxes method to swap last two indices because vmat may be a 3D\n # array (nset,nao,nao) in single k-point mode or a 4D array\n # (nset,nkpts,nao,nao) in k-points mode\n for i in range(nset): # for (\\nabla\\mu) \\nu + \\mu (\\nabla\\nu)\n vmat[i] = vmat[i] + vmat[i].swapaxes(-2,-1).conj()\n\n elif xctype == 'MGGA':\n raise NotImplementedError('meta-GGA')\n\n if isinstance(dms, numpy.ndarray) and dms.ndim == vmat[0].ndim:\n # One set of DMs in the input\n vmat = vmat[0]\n return numpy.asarray(vmat)\n\ndef nr_rks_fxc_st(ni, cell, grids, xc_code, dm0, dms_alpha, relativity=0, singlet=True,\n rho0=None, vxc=None, fxc=None, kpts=None, max_memory=2000,\n verbose=None):\n '''Associated to singlet or triplet Hessian\n Note the difference to nr_rks_fxc, dms_alpha is the response density\n matrices of alpha spin, alpha+/-beta DM is applied due to singlet/triplet\n coupling\n\n Ref. CPL, 256, 454\n '''\n if kpts is None:\n kpts = numpy.zeros((1,3))\n xctype = ni._xc_type(xc_code)\n\n make_rho, nset, nao = ni._gen_rho_evaluator(cell, dms_alpha)\n if ((xctype == 'LDA' and fxc is None) or\n (xctype == 'GGA' and rho0 is None)):\n make_rho0 = ni._gen_rho_evaluator(cell, dm0, 1)[0]\n\n ao_loc = cell.ao_loc_nr()\n vmat = [0] * nset\n if xctype == 'LDA':\n ao_deriv = 0\n ip = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n ngrid = weight.size\n if fxc is None:\n rho = make_rho0(0, ao_k1, mask, xctype)\n rho *= .5 # alpha density\n fxc0 = ni.eval_xc(xc_code, (rho,rho), 1, deriv=2)[2]\n u_u, u_d, d_d = fxc0[0].T\n else:\n u_u, u_d, d_d = fxc[0][ip:ip+ngrid].T\n ip += ngrid\n if singlet:\n frho = u_u + u_d\n else:\n frho = u_u - u_d\n\n for i in range(nset):\n rho1 = make_rho(i, ao_k1, mask, xctype)\n wv = weight * frho * rho1\n vmat[i] += ni._fxc_mat(cell, ao_k1, wv, mask, xctype, ao_loc)\n\n elif xctype == 'GGA':\n ao_deriv = 1\n ip = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n ngrid = weight.size\n if vxc is None or fxc is None:\n rho = make_rho0(0, ao_k1, mask, xctype)\n rho *= .5 # alpha density\n vxc0, fxc0 = ni.eval_xc(xc_code, (rho,rho), 1, deriv=2)[1:3]\n\n vsigma = vxc0[1].T\n u_u, u_d, d_d = fxc0[0].T # v2rho2\n u_uu, u_ud, u_dd, d_uu, d_ud, d_dd = fxc0[1].T # v2rhosigma\n uu_uu, uu_ud, uu_dd, ud_ud, ud_dd, dd_dd = fxc0[2].T # v2sigma2\n else:\n rho = rho0[0][:,ip:ip+ngrid]\n vsigma = vxc[1][ip:ip+ngrid].T\n u_u, u_d, d_d = fxc[0][ip:ip+ngrid].T # v2rho2\n u_uu, u_ud, u_dd, d_uu, d_ud, d_dd = fxc[1][ip:ip+ngrid].T # v2rhosigma\n uu_uu, uu_ud, uu_dd, ud_ud, ud_dd, dd_dd = fxc[2][ip:ip+ngrid].T # v2sigma2\n\n if singlet:\n fgamma = vsigma[0] + vsigma[1] * .5\n frho = u_u + u_d\n fgg = uu_uu + .5*ud_ud + 2*uu_ud + uu_dd\n frhogamma = u_uu + u_dd + u_ud\n else:\n fgamma = vsigma[0] - vsigma[1] * .5\n frho = u_u - u_d\n fgg = uu_uu - uu_dd\n frhogamma = u_uu - u_dd\n\n for i in range(nset):\n # rho1[0 ] = |b><j| z_{bj}\n # rho1[1:] = \\nabla(|b><j|) z_{bj}\n rho1 = make_rho(i, ao_k1, mask, xctype)\n wv = _rks_gga_wv1(rho, rho1, (None,fgamma),\n (frho,frhogamma,fgg), weight)\n vmat[i] += ni._fxc_mat(cell, ao_k1, wv, mask, xctype, ao_loc)\n\n for i in range(nset): # for (\\nabla\\mu) \\nu + \\mu (\\nabla\\nu)\n vmat[i] = vmat[i] + vmat[i].swapaxes(-2,-1).conj()\n\n elif xctype == 'MGGA':\n raise NotImplementedError('meta-GGA')\n\n if isinstance(dms_alpha, numpy.ndarray) and dms_alpha.ndim == vmat[0].ndim:\n vmat = vmat[0]\n return numpy.asarray(vmat)\n\n\ndef nr_uks_fxc(ni, cell, grids, xc_code, dm0, dms, relativity=0, hermi=0,\n rho0=None, vxc=None, fxc=None, kpts=None, max_memory=2000,\n verbose=None):\n '''Contract UKS XC kernel matrix with given density matrices\n\n Args:\n ni : an instance of :class:`NumInt` or :class:`KNumInt`\n\n cell : instance of :class:`Mole` or :class:`Cell`\n\n grids : an instance of :class:`Grids`\n grids.coords and grids.weights are needed for coordinates and weights of meshgrids.\n xc_code : str\n XC functional description.\n See :func:`parse_xc` of pyscf/dft/libxc.py for more details.\n dms : 2D array a list of 2D arrays\n Density matrix or multiple density matrices\n\n Kwargs:\n hermi : int\n Input density matrices symmetric or not\n max_memory : int or float\n The maximum size of cache to use (in MB).\n rho0 : float array\n Zero-order density (and density derivative for GGA). Giving kwargs rho0,\n vxc and fxc to improve better performance.\n vxc : float array\n First order XC derivatives\n fxc : float array\n Second order XC derivatives\n\n Returns:\n nelec, excsum, vmat.\n nelec is the number of electrons generated by numerical integration.\n excsum is the XC functional value. vmat is the XC potential matrix in\n 2D array of shape (nao,nao) where nao is the number of AO functions.\n\n Examples:\n\n '''\n if kpts is None:\n kpts = numpy.zeros((1,3))\n xctype = ni._xc_type(xc_code)\n\n dma, dmb = _format_uks_dm(dms)\n nao = dma.shape[-1]\n make_rhoa, nset = ni._gen_rho_evaluator(cell, dma, hermi)[:2]\n make_rhob = ni._gen_rho_evaluator(cell, dmb, hermi)[0]\n\n if ((xctype == 'LDA' and fxc is None) or\n (xctype == 'GGA' and rho0 is None)):\n dm0a, dm0b = _format_uks_dm(dm0)\n make_rho0a = ni._gen_rho_evaluator(cell, dm0a, 1)[0]\n make_rho0b = ni._gen_rho_evaluator(cell, dm0b, 1)[0]\n\n shls_slice = (0, cell.nbas)\n ao_loc = cell.ao_loc_nr()\n\n vmata = [0] * nset\n vmatb = [0] * nset\n if xctype == 'LDA':\n ao_deriv = 0\n ip = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n ngrid = weight.size\n if fxc is None:\n rho0a = make_rho0a(0, ao_k1, mask, xctype)\n rho0b = make_rho0b(0, ao_k1, mask, xctype)\n fxc0 = ni.eval_xc(xc_code, (rho0a,rho0b), 1, relativity, 2, verbose)[2]\n u_u, u_d, d_d = fxc0[0].T\n else:\n u_u, u_d, d_d = fxc[0][ip:ip+ngrid].T\n ip += ngrid\n\n for i in range(nset):\n rho1a = make_rhoa(i, ao_k1, mask, xctype)\n rho1b = make_rhob(i, ao_k1, mask, xctype)\n wv = u_u * rho1a + u_d * rho1b\n wv *= weight\n vmata[i] += ni._fxc_mat(cell, ao_k1, wv, mask, xctype, ao_loc)\n wv = u_d * rho1a + d_d * rho1b\n wv *= weight\n vmatb[i] += ni._fxc_mat(cell, ao_k1, wv, mask, xctype, ao_loc)\n\n elif xctype == 'GGA':\n ao_deriv = 1\n ip = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n ngrid = weight.size\n if rho0 is None:\n rho0a = make_rho0a(0, ao_k1, mask, xctype)\n rho0b = make_rho0b(0, ao_k1, mask, xctype)\n else:\n rho0a = rho0[0][:,ip:ip+ngrid]\n rho0b = rho0[1][:,ip:ip+ngrid]\n if vxc is None or fxc is None:\n vxc0, fxc0 = ni.eval_xc(xc_code, (rho0a,rho0b), 1, relativity, 2, verbose)[1:3]\n else:\n vxc0 = (None, vxc[1][ip:ip+ngrid])\n fxc0 = (fxc[0][ip:ip+ngrid], fxc[1][ip:ip+ngrid], fxc[2][ip:ip+ngrid])\n ip += ngrid\n\n for i in range(nset):\n rho1a = make_rhoa(i, ao_k1, mask, xctype)\n rho1b = make_rhob(i, ao_k1, mask, xctype)\n wva, wvb = _uks_gga_wv1((rho0a,rho0b), (rho1a,rho1b),\n vxc0, fxc0, weight)\n vmata[i] += ni._fxc_mat(cell, ao_k1, wva, mask, xctype, ao_loc)\n vmatb[i] += ni._fxc_mat(cell, ao_k1, wvb, mask, xctype, ao_loc)\n\n for i in range(nset): # for (\\nabla\\mu) \\nu + \\mu (\\nabla\\nu)\n vmata[i] = vmata[i] + vmata[i].swapaxes(-1,-2).conj()\n vmatb[i] = vmatb[i] + vmatb[i].swapaxes(-1,-2).conj()\n elif xctype == 'MGGA':\n raise NotImplementedError('meta-GGA')\n\n if dma.ndim == vmata[0].ndim: # One set of DMs in the input\n vmata = vmata[0]\n vmatb = vmatb[0]\n return numpy.asarray((vmata,vmatb))\n\ndef _fxc_mat(cell, ao, wv, non0tab, xctype, ao_loc):\n shls_slice = (0, cell.nbas)\n\n if xctype == 'LDA' or xctype == 'HF':\n #:aow = numpy.einsum('pi,p->pi', ao, wv)\n aow = _scale_ao(ao, wv)\n mat = _dot_ao_ao(cell, ao, aow, non0tab, shls_slice, ao_loc)\n else:\n #:aow = numpy.einsum('npi,np->pi', ao, wv)\n aow = _scale_ao(ao, wv)\n mat = _dot_ao_ao(cell, ao[0], aow, non0tab, shls_slice, ao_loc)\n return mat\n\ndef cache_xc_kernel(ni, cell, grids, xc_code, mo_coeff, mo_occ, spin=0,\n kpts=None, max_memory=2000):\n '''Compute the 0th order density, Vxc and fxc. They can be used in TDDFT,\n DFT hessian module etc.\n '''\n if kpts is None:\n kpts = numpy.zeros((1,3))\n xctype = ni._xc_type(xc_code)\n ao_deriv = 0\n if xctype == 'GGA':\n ao_deriv = 1\n elif xctype == 'MGGA':\n raise NotImplementedError('meta-GGA')\n\n nao = cell.nao_nr()\n if spin == 0:\n rho = []\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n rho.append(ni.eval_rho2(cell, ao_k1, mo_coeff, mo_occ, mask, xctype))\n rho = numpy.hstack(rho)\n else:\n rhoa = []\n rhob = []\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, ao_deriv, kpts, None, max_memory):\n rhoa.append(ni.eval_rho2(cell, ao_k1, mo_coeff[0], mo_occ[0], mask, xctype))\n rhob.append(ni.eval_rho2(cell, ao_k1, mo_coeff[1], mo_occ[1], mask, xctype))\n rho = (numpy.hstack(rhoa), numpy.hstack(rhob))\n vxc, fxc = ni.eval_xc(xc_code, rho, spin, 0, 2, 0)[1:3]\n return rho, vxc, fxc\n\n\ndef get_rho(ni, cell, dm, grids, kpts=numpy.zeros((1,3)), max_memory=2000):\n '''Density in real space\n '''\n make_rho, nset, nao = ni._gen_rho_evaluator(cell, dm)\n assert(nset == 1)\n rho = numpy.empty(grids.weights.size)\n p1 = 0\n for ao_k1, ao_k2, mask, weight, coords \\\n in ni.block_loop(cell, grids, nao, 0, kpts, None, max_memory):\n p0, p1 = p1, p1 + weight.size\n rho[p0:p1] = make_rho(0, ao_k1, mask, 'LDA')\n return rho\n\n\nclass NumInt(numint.NumInt):\n '''Generalization of pyscf's NumInt class for a single k-point shift and\n periodic images.\n '''\n def eval_ao(self, cell, coords, kpt=numpy.zeros(3), deriv=0, relativity=0,\n shls_slice=None, non0tab=None, out=None, verbose=None):\n return eval_ao(cell, coords, kpt, deriv, relativity, shls_slice,\n non0tab, out, verbose)\n\n @lib.with_doc(make_mask.__doc__)\n def make_mask(self, cell, coords, relativity=0, shls_slice=None,\n verbose=None):\n return make_mask(cell, coords, relativity, shls_slice, verbose)\n\n @lib.with_doc(eval_rho.__doc__)\n def eval_rho(self, cell, ao, dm, non0tab=None, xctype='LDA', hermi=0, verbose=None):\n return eval_rho(cell, ao, dm, non0tab, xctype, hermi, verbose)\n\n def eval_rho2(self, cell, ao, mo_coeff, mo_occ, non0tab=None, xctype='LDA',\n verbose=None):\n return eval_rho2(cell, ao, mo_coeff, mo_occ, non0tab, xctype, verbose)\n\n def nr_vxc(self, cell, grids, xc_code, dms, spin=0, relativity=0, hermi=0,\n kpt=None, kpts_band=None, max_memory=2000, verbose=None):\n '''Evaluate RKS/UKS XC functional and potential matrix.\n See :func:`nr_rks` and :func:`nr_uks` for more details.\n '''\n if spin == 0:\n return self.nr_rks(cell, grids, xc_code, dms, hermi,\n kpt, kpts_band, max_memory, verbose)\n else:\n return self.nr_uks(cell, grids, xc_code, dms, hermi,\n kpt, kpts_band, max_memory, verbose)\n\n @lib.with_doc(nr_rks.__doc__)\n def nr_rks(self, cell, grids, xc_code, dms, hermi=0,\n kpt=numpy.zeros(3), kpts_band=None, max_memory=2000, verbose=None):\n if kpts_band is not None:\n# To compute Vxc on kpts_band, convert the NumInt object to KNumInt object.\n ni = KNumInt()\n ni.__dict__.update(self.__dict__)\n nao = dms.shape[-1]\n return ni.nr_rks(cell, grids, xc_code, dms.reshape(-1,1,nao,nao),\n hermi, kpt.reshape(1,3), kpts_band, max_memory,\n verbose)\n return nr_rks(self, cell, grids, xc_code, dms,\n 0, 0, hermi, kpt, kpts_band, max_memory, verbose)\n\n @lib.with_doc(nr_uks.__doc__)\n def nr_uks(self, cell, grids, xc_code, dms, hermi=0,\n kpt=numpy.zeros(3), kpts_band=None, max_memory=2000, verbose=None):\n if kpts_band is not None:\n# To compute Vxc on kpts_band, convert the NumInt object to KNumInt object.\n ni = KNumInt()\n ni.__dict__.update(self.__dict__)\n nao = dms[0].shape[-1]\n return ni.nr_uks(cell, grids, xc_code, dms.reshape(-1,1,nao,nao),\n hermi, kpt.reshape(1,3), kpts_band, max_memory,\n verbose)\n return nr_uks(self, cell, grids, xc_code, dms,\n 1, 0, hermi, kpt, kpts_band, max_memory, verbose)\n\n def eval_mat(self, cell, ao, weight, rho, vxc,\n non0tab=None, xctype='LDA', spin=0, verbose=None):\n# Guess whether ao is evaluated for kpts_band. When xctype is LDA, ao on grids\n# should be a 2D array. For other xc functional, ao should be a 3D array.\n if ao.ndim == 2 or (xctype != 'LDA' and ao.ndim == 3):\n mat = eval_mat(cell, ao, weight, rho, vxc, non0tab, xctype, spin, verbose)\n else:\n nkpts = len(ao)\n nao = ao[0].shape[-1]\n mat = numpy.empty((nkpts,nao,nao), dtype=numpy.complex128)\n for k in range(nkpts):\n mat[k] = eval_mat(cell, ao[k], weight, rho, vxc,\n non0tab, xctype, spin, verbose)\n return mat\n\n def _fxc_mat(self, cell, ao, wv, non0tab, xctype, ao_loc):\n return _fxc_mat(cell, ao, wv, non0tab, xctype, ao_loc)\n\n def block_loop(self, cell, grids, nao, deriv=0, kpt=numpy.zeros(3),\n kpts_band=None, max_memory=2000, non0tab=None, blksize=None):\n '''Define this macro to loop over grids by blocks.\n '''\n# For UniformGrids, grids.coords does not indicate whehter grids are initialized\n if grids.non0tab is None:\n grids.build(with_non0tab=True)\n grids_coords = grids.coords\n grids_weights = grids.weights\n ngrids = grids_coords.shape[0]\n comp = (deriv+1)*(deriv+2)*(deriv+3)//6\n# NOTE to index grids.non0tab, the blksize needs to be the integer multiplier of BLKSIZE\n if blksize is None:\n blksize = int(max_memory*1e6/(comp*2*nao*16*BLKSIZE))*BLKSIZE\n blksize = max(BLKSIZE, min(blksize, ngrids, BLKSIZE*1200))\n if non0tab is None:\n non0tab = grids.non0tab\n if non0tab is None:\n non0tab = numpy.empty(((ngrids+BLKSIZE-1)//BLKSIZE,cell.nbas),\n dtype=numpy.uint8)\n non0tab[:] = 0xff\n kpt = numpy.reshape(kpt, 3)\n if kpts_band is None:\n kpt1 = kpt2 = kpt\n else:\n kpt1 = kpts_band\n kpt2 = kpt\n\n for ip0 in range(0, ngrids, blksize):\n ip1 = min(ngrids, ip0+blksize)\n coords = grids_coords[ip0:ip1]\n weight = grids_weights[ip0:ip1]\n non0 = non0tab[ip0//BLKSIZE:]\n ao_k2 = self.eval_ao(cell, coords, kpt2, deriv=deriv, non0tab=non0)\n if abs(kpt1-kpt2).sum() < 1e-9:\n ao_k1 = ao_k2\n else:\n ao_k1 = self.eval_ao(cell, coords, kpt1, deriv=deriv)\n yield ao_k1, ao_k2, non0, weight, coords\n ao_k1 = ao_k2 = None\n\n def _gen_rho_evaluator(self, cell, dms, hermi=0):\n return numint.NumInt._gen_rho_evaluator(self, cell, dms, hermi)\n\n nr_rks_fxc = nr_rks_fxc\n nr_uks_fxc = nr_uks_fxc\n cache_xc_kernel = cache_xc_kernel\n get_rho = get_rho\n\n def rsh_and_hybrid_coeff(self, xc_code, spin=0):\n omega, alpha, hyb = numint.NumInt.rsh_and_hybrid_coeff(self, xc_code, spin)\n if abs(omega) > 1e-10:\n raise NotImplementedError\n return omega, alpha, hyb\n_NumInt = NumInt\n\n\nclass KNumInt(numint.NumInt):\n '''Generalization of pyscf's NumInt class for k-point sampling and\n periodic images.\n '''\n def __init__(self, kpts=numpy.zeros((1,3))):\n numint.NumInt.__init__(self)\n self.kpts = numpy.reshape(kpts, (-1,3))\n\n def eval_ao(self, cell, coords, kpts=numpy.zeros((1,3)), deriv=0, relativity=0,\n shls_slice=None, non0tab=None, out=None, verbose=None, **kwargs):\n return eval_ao_kpts(cell, coords, kpts, deriv,\n relativity, shls_slice, non0tab, out, verbose)\n\n @lib.with_doc(make_mask.__doc__)\n def make_mask(self, cell, coords, relativity=0, shls_slice=None,\n verbose=None):\n return make_mask(cell, coords, relativity, shls_slice, verbose)\n\n def eval_rho(self, cell, ao_kpts, dm_kpts, non0tab=None, xctype='LDA',\n hermi=0, verbose=None):\n '''Collocate the *real* density (opt. gradients) on the real-space grid.\n\n Args:\n cell : Mole or Cell object\n ao_kpts : (nkpts, ngrids, nao) ndarray\n AO values at each k-point\n dm_kpts: (nkpts, nao, nao) ndarray\n Density matrix at each k-point\n\n Returns:\n rhoR : (ngrids,) ndarray\n '''\n nkpts = len(ao_kpts)\n rhoR = 0\n for k in range(nkpts):\n rhoR += eval_rho(cell, ao_kpts[k], dm_kpts[k], non0tab, xctype,\n hermi, verbose)\n rhoR *= 1./nkpts\n return rhoR\n\n def eval_rho2(self, cell, ao_kpts, mo_coeff_kpts, mo_occ_kpts,\n non0tab=None, xctype='LDA', verbose=None):\n nkpts = len(ao_kpts)\n rhoR = 0\n for k in range(nkpts):\n rhoR += eval_rho2(cell, ao_kpts[k], mo_coeff_kpts[k],\n mo_occ_kpts[k], non0tab, xctype, verbose)\n rhoR *= 1./nkpts\n return rhoR\n\n def nr_vxc(self, cell, grids, xc_code, dms, spin=0, relativity=0, hermi=0,\n kpts=None, kpts_band=None, max_memory=2000, verbose=None):\n '''Evaluate RKS/UKS XC functional and potential matrix.\n See :func:`nr_rks` and :func:`nr_uks` for more details.\n '''\n if spin == 0:\n return self.nr_rks(cell, grids, xc_code, dms, hermi,\n kpts, kpts_band, max_memory, verbose)\n else:\n return self.nr_uks(cell, grids, xc_code, dms, hermi,\n kpts, kpts_band, max_memory, verbose)\n\n @lib.with_doc(nr_rks.__doc__)\n def nr_rks(self, cell, grids, xc_code, dms, hermi=0, kpts=None, kpts_band=None,\n max_memory=2000, verbose=None, **kwargs):\n if kpts is None:\n if 'kpt' in kwargs:\n sys.stderr.write('WARN: KNumInt.nr_rks function finds keyword '\n 'argument \"kpt\" and converts it to \"kpts\"\\n')\n kpts = kwargs['kpt']\n else:\n kpts = self.kpts\n kpts = kpts.reshape(-1,3)\n\n return nr_rks(self, cell, grids, xc_code, dms, 0, 0,\n hermi, kpts, kpts_band, max_memory, verbose)\n\n @lib.with_doc(nr_uks.__doc__)\n def nr_uks(self, cell, grids, xc_code, dms, hermi=0, kpts=None, kpts_band=None,\n max_memory=2000, verbose=None, **kwargs):\n if kpts is None:\n if 'kpt' in kwargs:\n sys.stderr.write('WARN: KNumInt.nr_uks function finds keyword '\n 'argument \"kpt\" and converts it to \"kpts\"\\n')\n kpts = kwargs['kpt']\n else:\n kpts = self.kpts\n kpts = kpts.reshape(-1,3)\n\n return nr_uks(self, cell, grids, xc_code, dms, 1, 0,\n hermi, kpts, kpts_band, max_memory, verbose)\n\n def eval_mat(self, cell, ao_kpts, weight, rho, vxc,\n non0tab=None, xctype='LDA', spin=0, verbose=None):\n nkpts = len(ao_kpts)\n nao = ao_kpts[0].shape[-1]\n dtype = numpy.result_type(*ao_kpts)\n mat = numpy.empty((nkpts,nao,nao), dtype=dtype)\n for k in range(nkpts):\n mat[k] = eval_mat(cell, ao_kpts[k], weight, rho, vxc,\n non0tab, xctype, spin, verbose)\n return mat\n\n def _fxc_mat(self, cell, ao_kpts, wv, non0tab, xctype, ao_loc):\n nkpts = len(ao_kpts)\n nao = ao_kpts[0].shape[-1]\n dtype = numpy.result_type(*ao_kpts)\n mat = numpy.empty((nkpts,nao,nao), dtype=dtype)\n for k in range(nkpts):\n mat[k] = _fxc_mat(cell, ao_kpts[k], wv, non0tab, xctype, ao_loc)\n return mat\n\n def block_loop(self, cell, grids, nao, deriv=0, kpts=numpy.zeros((1,3)),\n kpts_band=None, max_memory=2000, non0tab=None, blksize=None):\n '''Define this macro to loop over grids by blocks.\n '''\n if grids.coords is None:\n grids.build(with_non0tab=True)\n grids_coords = grids.coords\n grids_weights = grids.weights\n ngrids = grids_coords.shape[0]\n nkpts = len(kpts)\n comp = (deriv+1)*(deriv+2)*(deriv+3)//6\n# NOTE to index grids.non0tab, the blksize needs to be the integer multiplier of BLKSIZE\n if blksize is None:\n blksize = int(max_memory*1e6/(comp*2*nkpts*nao*16*BLKSIZE))*BLKSIZE\n blksize = max(BLKSIZE, min(blksize, ngrids, BLKSIZE*1200))\n if non0tab is None:\n non0tab = grids.non0tab\n if non0tab is None:\n non0tab = numpy.empty(((ngrids+BLKSIZE-1)//BLKSIZE,cell.nbas),\n dtype=numpy.uint8)\n non0tab[:] = 0xff\n if kpts_band is not None:\n kpts_band = numpy.reshape(kpts_band, (-1,3))\n where = [member(k, kpts) for k in kpts_band]\n where = [k_id[0] if len(k_id)>0 else None for k_id in where]\n\n for ip0 in range(0, ngrids, blksize):\n ip1 = min(ngrids, ip0+blksize)\n coords = grids_coords[ip0:ip1]\n weight = grids_weights[ip0:ip1]\n non0 = non0tab[ip0//BLKSIZE:]\n ao_k1 = ao_k2 = self.eval_ao(cell, coords, kpts, deriv=deriv, non0tab=non0)\n if kpts_band is not None:\n ao_k1 = self.eval_ao(cell, coords, kpts_band, deriv=deriv, non0tab=non0)\n yield ao_k1, ao_k2, non0, weight, coords\n ao_k1 = ao_k2 = None\n\n def _gen_rho_evaluator(self, cell, dms, hermi=0):\n if getattr(dms, 'mo_coeff', None) is not None:\n mo_coeff = dms.mo_coeff\n mo_occ = dms.mo_occ\n if isinstance(dms[0], numpy.ndarray) and dms[0].ndim == 2:\n mo_coeff = [mo_coeff]\n mo_occ = [mo_occ]\n nao = cell.nao_nr()\n ndms = len(mo_occ)\n def make_rho(idm, ao, non0tab, xctype):\n return self.eval_rho2(cell, ao, mo_coeff[idm], mo_occ[idm],\n non0tab, xctype)\n else:\n if isinstance(dms[0], numpy.ndarray) and dms[0].ndim == 2:\n dms = [numpy.stack(dms)]\n #if not hermi:\n # Density (or response of density) is always real for DFT.\n # Symmetrizing DM for gamma point should not change the value of\n # density. However, when k-point is considered, unless dm and\n # dm.conj().transpose produce the same real part of density, the\n # symmetrization code below may be incorrect (proof is needed).\n # # dm.shape = (nkpts, nao, nao)\n # dms = [(dm+dm.conj().transpose(0,2,1))*.5 for dm in dms]\n nao = dms[0].shape[-1]\n ndms = len(dms)\n def make_rho(idm, ao_kpts, non0tab, xctype):\n return self.eval_rho(cell, ao_kpts, dms[idm], non0tab, xctype,\n hermi=hermi)\n return make_rho, ndms, nao\n\n nr_rks_fxc = nr_rks_fxc\n nr_uks_fxc = nr_uks_fxc\n cache_xc_kernel = cache_xc_kernel\n get_rho = get_rho\n\n def rsh_and_hybrid_coeff(self, xc_code, spin=0):\n omega, alpha, hyb = numint.NumInt.rsh_and_hybrid_coeff(self, xc_code, spin)\n if abs(omega) > 1e-10:\n raise NotImplementedError\n return omega, alpha, hyb\n_KNumInt = KNumInt\n", "#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport unittest\nimport numpy\nfrom pyscf import gto, scf\nfrom pyscf import dft\nfrom pyscf import lib\n\nmol = gto.Mole()\nmol.verbose = 0\nmol.output = None\nmol.atom = 'h 0 0 0; h 1 .5 0; h 0 4 1; h 1 0 .2'\nmol.basis = 'aug-ccpvdz'\nmol.build()\n#dm = scf.RHF(mol).run(conv_tol=1e-14).make_rdm1()\ndm = numpy.load(os.path.realpath(os.path.join(__file__, '..', 'dm_h4.npy')))\nmf = dft.RKS(mol)\nmf.grids.atom_grid = {\"H\": (50, 110)}\nmf.prune = None\nmf.grids.build(with_non0tab=False)\nnao = mol.nao_nr()\nao = dft.numint.eval_ao(mol, mf.grids.coords, deriv=1)\nrho = dft.numint.eval_rho(mol, ao, dm, xctype='GGA')\n\ndef tearDownModule():\n global mol, mf, ao, rho\n del mol, mf, ao, rho\n\ndef finger(a):\n w = numpy.cos(numpy.arange(a.size))\n return numpy.dot(w, a.ravel())\n\nclass KnownValues(unittest.TestCase):\n def test_parse_xc(self):\n hyb, fn_facs = dft.xcfun.parse_xc('.5*HF+.5*B3LYP,VWN*.5')\n self.assertAlmostEqual(hyb[0], .6, 12)\n self.assertEqual([x[0] for x in fn_facs], [0,6,16,3])\n self.assertTrue(numpy.allclose([x[1] for x in fn_facs],\n (0.04, 0.36, 0.405, 0.595)))\n hyb, fn_facs = dft.xcfun.parse_xc('HF,')\n self.assertEqual(hyb[0], 1)\n self.assertEqual(fn_facs, [])\n\n hyb, fn_facs = dft.libxc.parse_xc('B88 - SLATER')\n self.assertEqual(fn_facs, [(106, 1), (1, -1)])\n hyb, fn_facs = dft.libxc.parse_xc('B88 -SLATER*.5')\n self.assertEqual(fn_facs, [(106, 1), (1, -0.5)])\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.5*B3LYP+0.25*B3LYP')\n self.assertTrue(numpy.allclose(hyb, [.15, 0, 0]))\n hyb = dft.libxc.hybrid_coeff('0.5*B3LYP+0.25*B3LYP')\n self.assertAlmostEqual(hyb, .15, 12)\n\n hyb, fn_facs = dft.xcfun.parse_xc('CAM_B3LYP')\n self.assertTrue(numpy.allclose(hyb, [0.19, 0.65, 0.33]))\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.6*CAM_B3LYP+0.4*B3P86')\n self.assertTrue(numpy.allclose(hyb, [.08+0.19*.6, 0.65*.6, 0.33]))\n self.assertTrue(numpy.allclose(fn_facs,\n [(9, 0.6), (3, 0.19), (16, 0.486), (0, 0.032), (6, 0.288), (46, 0.324)]))\n rsh = dft.xcfun.rsh_coeff('0.6*CAM_B3LYP+0.4*B3P86')\n self.assertTrue(numpy.allclose(rsh, (0.33, 0.39, -0.196)))\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.4*B3P86+0.6*CAM_B3LYP')\n self.assertTrue(numpy.allclose(hyb, [.08+0.19*.6, 0.65*.6, 0.33]))\n self.assertTrue(numpy.allclose(fn_facs,\n [(0, 0.032), (6, 0.288), (46, 0.324), (3, 0.19), (9, 0.6), (16, 0.486)]))\n rsh = dft.xcfun.rsh_coeff('0.4*B3P86+0.6*CAM_B3LYP')\n self.assertTrue(numpy.allclose(rsh, (0.33, 0.39, -0.196)))\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.5*SR-HF(0.3) + .8*HF + .22*LR_HF')\n self.assertEqual(hyb, [1.3, 1.02, 0.3])\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.5*SR-HF + .22*LR_HF(0.3) + .8*HF')\n self.assertEqual(hyb, [1.3, 1.02, 0.3])\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.5*SR-HF + .8*HF + .22*LR_HF(0.3)')\n self.assertEqual(hyb, [1.3, 1.02, 0.3])\n\n hyb, fn_facs = dft.xcfun.parse_xc('0.5*RSH(2.04;0.56;0.3) + 0.5*BP86')\n self.assertEqual(hyb, [1.3, 1.02, 0.3])\n self.assertEqual(fn_facs, [(6, 0.5), (46, 0.5)])\n\n self.assertRaises(ValueError, dft.xcfun.parse_xc, 'SR_HF(0.3) + LR_HF(.5)')\n self.assertRaises(ValueError, dft.xcfun.parse_xc, 'LR-HF(0.3) + SR-HF(.5)')\n\n hyb = dft.xcfun.hybrid_coeff('M05')\n self.assertAlmostEqual(hyb, 0.28, 9)\n\n hyb, fn_facs = dft.xcfun.parse_xc('APBE,')\n self.assertEqual(fn_facs[0][0], 58)\n\n hyb, fn_facs = dft.xcfun.parse_xc('VWN,')\n self.assertEqual(fn_facs, [(3, 1)])\n\n hyb, fn_facs = dft.xcfun.parse_xc('TF,')\n self.assertEqual(fn_facs, [(24, 1)])\n\n ref = [(0, 1), (3, 1)]\n self.assertEqual(dft.xcfun.parse_xc_name('LDA,VWN'), (0,3))\n self.assertEqual(dft.xcfun.parse_xc(('LDA','VWN'))[1], ref)\n self.assertEqual(dft.xcfun.parse_xc((0, 3))[1], ref)\n self.assertEqual(dft.xcfun.parse_xc('0, 3')[1], ref)\n self.assertEqual(dft.xcfun.parse_xc(3)[1], [(3,1)])\n\n #self.assertEqual(dft.xcfun.parse_xc('M11-L')[1], [(226,1),(75,1)])\n #self.assertEqual(dft.xcfun.parse_xc('M11L' )[1], [(226,1),(75,1)])\n #self.assertEqual(dft.xcfun.parse_xc('M11-L,M11L' )[1], [(226,1),(75,1)])\n #self.assertEqual(dft.xcfun.parse_xc('M11_L,M11-L')[1], [(226,1),(75,1)])\n #self.assertEqual(dft.xcfun.parse_xc('M11L,M11_L' )[1], [(226,1),(75,1)])\n\n #self.assertEqual(dft.xcfun.parse_xc('Xpbe,')[1], [(123,1)])\n #self.assertEqual(dft.xcfun.parse_xc('pbe,' )[1], [(101,1)])\n hyb, fn_facs = dft.xcfun.parse_xc('PBE*.4+LDA')\n self.assertEqual(fn_facs, [(5, 0.4), (4, 0.4), (0, 1)])\n hyb, fn_facs = dft.xcfun.parse_xc('PBE*.4+VWN')\n self.assertEqual(fn_facs, [(5, 0.4), (4, 0.4), (3, 1)])\n\n self.assertTrue (dft.xcfun.is_meta_gga('m05'))\n self.assertFalse(dft.xcfun.is_meta_gga('pbe0'))\n self.assertFalse(dft.xcfun.is_meta_gga('tf,'))\n self.assertFalse(dft.xcfun.is_meta_gga('vv10'))\n self.assertTrue (dft.xcfun.is_gga('PBE0'))\n self.assertFalse(dft.xcfun.is_gga('m05'))\n self.assertFalse(dft.xcfun.is_gga('tf,'))\n self.assertTrue (dft.xcfun.is_lda('tf,'))\n self.assertFalse(dft.xcfun.is_lda('vv10'))\n self.assertTrue (dft.xcfun.is_hybrid_xc('m05'))\n self.assertTrue (dft.xcfun.is_hybrid_xc('pbe0,'))\n self.assertFalse(dft.xcfun.is_hybrid_xc('m05,'))\n self.assertFalse(dft.xcfun.is_hybrid_xc('vv10'))\n self.assertTrue (dft.xcfun.is_hybrid_xc(('b3lyp',4,'vv10')))\n\n def test_nlc_coeff(self):\n self.assertEqual(dft.xcfun.nlc_coeff('vv10'), [5.9, 0.0093])\n\n def test_lda(self):\n e,v,f,k = dft.xcfun.eval_xc('lda,', rho[0][:3], deriv=3)\n self.assertAlmostEqual(lib.finger(e) , -0.4720562542635522, 8)\n self.assertAlmostEqual(lib.finger(v[0]), -0.6294083390180697, 8)\n self.assertAlmostEqual(lib.finger(f[0]), -1.1414693830969338, 8)\n self.assertAlmostEqual(lib.finger(k[0]), 4.1402447248393921, 8)\n\n e,v,f,k = dft.xcfun.eval_xc('lda,', [rho[0][:3]*.5]*2, spin=1, deriv=3)\n self.assertAlmostEqual(lib.finger(e) , -0.4720562542635522, 8)\n self.assertAlmostEqual(lib.finger(v[0].T[0]), -0.6294083390180697, 8)\n self.assertAlmostEqual(lib.finger(v[0].T[1]), -0.6294083390180697, 8)\n self.assertAlmostEqual(lib.finger(f[0].T[0]), -1.1414693830969338*2, 8)\n self.assertAlmostEqual(lib.finger(f[0].T[2]), -1.1414693830969338*2, 8)\n self.assertAlmostEqual(lib.finger(k[0].T[0]), 4.1402447248393921*4, 7)\n self.assertAlmostEqual(lib.finger(k[0].T[3]), 4.1402447248393921*4, 7)\n\n def test_lyp(self):\n e,v,f = dft.xcfun.eval_xc(',LYP', rho, deriv=3)[:3]\n self.assertAlmostEqual(numpy.dot(rho[0],e), -62.114576182676615, 8)\n self.assertAlmostEqual(numpy.dot(rho[0],v[0]),-81.771670866308455, 8)\n self.assertAlmostEqual(numpy.dot(rho[0],v[1]), 27.485383255125743, 8)\n self.assertAlmostEqual(numpy.dot(rho[0],f[0]), 186.823806251777, 7)\n self.assertAlmostEqual(numpy.dot(rho[0],f[1]), -3391.2428894571085, 6)\n self.assertAlmostEqual(numpy.dot(rho[0],f[2]), 0, 9)\n\n def test_beckex(self):\n rho =(numpy.array([1. , 1., 0., 0.]).reshape(-1,1),\n numpy.array([ .8, 1., 0., 0.]).reshape(-1,1))\n e,v,f = dft.xcfun.eval_xc('b88,', rho, spin=1, deriv=3)[:3]\n self.assertAlmostEqual(lib.finger(e) ,-0.9061911523772116 , 9)\n self.assertAlmostEqual(lib.finger(v[0]),-1.8531364353196298 , 9)\n self.assertAlmostEqual(lib.finger(v[1]),-0.0018308066137967724, 9)\n self.assertAlmostEqual(lib.finger(f[0]),-0.21602284426026866 , 9)\n self.assertAlmostEqual(lib.finger(f[1]), 0.0072053520662545617, 9)\n self.assertAlmostEqual(lib.finger(f[2]), 0.0002275350850255538, 9)\n\n def test_m05x(self):\n rho =(numpy.array([1., 1., 0., 0., 0., 0.165 ]).reshape(-1,1),\n numpy.array([.8, 1., 0., 0., 0., 0.1050]).reshape(-1,1))\n test_ref = numpy.array([-1.57876583, -2.12127045,-2.11264351,-0.00315462,\n 0.00000000, -0.00444560, 3.45640232, 4.4349756])\n exc, vxc, fxc, kxc = dft.xcfun.eval_xc('m05,', rho, 1, deriv=3)\n self.assertAlmostEqual(float(exc)*1.8, test_ref[0], 5)\n self.assertAlmostEqual(abs(vxc[0]-test_ref[1:3]).max(), 0, 6)\n self.assertAlmostEqual(abs(vxc[1]-test_ref[3:6]).max(), 0, 6)\n self.assertAlmostEqual(abs(vxc[3]-test_ref[6:8]).max(), 0, 5)\n\n exc, vxc, fxc, kxc = dft.xcfun.eval_xc('m05,', rho[0], 0, deriv=3)\n self.assertAlmostEqual(float(exc), -0.5746231988116002, 5)\n self.assertAlmostEqual(float(vxc[0]), -0.8806121005703862, 6)\n self.assertAlmostEqual(float(vxc[1]), -0.0032300155406846756, 7)\n self.assertAlmostEqual(float(vxc[3]), 0.4474953100487698, 5)\n\n def test_camb3lyp(self):\n rho = numpy.array([1., 1., 0.1, 0.1]).reshape(-1,1)\n exc, vxc, fxc, kxc = dft.xcfun.eval_xc('camb3lyp', rho, 0, deriv=1)\n # FIXME, xcfun and libxc do not agree on camb3lyp\n # self.assertAlmostEqual(float(exc), -0.5752559666317147, 5)\n # self.assertAlmostEqual(float(vxc[0]), -0.7709812578936763, 5)\n # self.assertAlmostEqual(float(vxc[1]), -0.0029862221286189846, 7)\n\n self.assertEqual(dft.xcfun.rsh_coeff('camb3lyp'), (0.33, 0.65, -0.46))\n\n def test_define_xc(self):\n def eval_xc(xc_code, rho, spin=0, relativity=0, deriv=1, verbose=None):\n exc = vxc = fxc = kxc = None\n return exc, vxc, fxc, kxc\n\n mf = dft.RKS(mol)\n ni = dft.xcfun.define_xc(mf._numint, eval_xc, 'GGA', hyb=0.2)\n ni = dft.xcfun.define_xc(mf._numint, 'b3lyp+vwn', 'GGA', hyb=0.2)\n self.assertRaises(ValueError, dft.xcfun.define_xc, mf._numint, 0.1)\n\n def test_vs_libxc_rks(self):\n ao = dft.numint.eval_ao(mol, mf.grids.coords[:200], deriv=2)\n rho = dft.numint.eval_rho(mol, ao, dm, xctype='MGGA')\n rhoa = rho[:,:200]\n def check(xc_code, deriv=3, e_place=9, v_place=9, f_place=9, k_place=9):\n exc0, vxc0, fxc0, kxc0 = dft.libxc.eval_xc(xc_code, rhoa, 0, deriv=deriv)\n exc1, vxc1, fxc1, kxc1 = dft.xcfun.eval_xc(xc_code, rhoa, 0, deriv=deriv)\n self.assertAlmostEqual(abs(exc0-exc1).max(), 0, e_place)\n if deriv > 0:\n for v0, v1 in zip(vxc0, vxc1):\n if v0 is not None and v1 is not None:\n self.assertAlmostEqual(abs(v0-v1).max(), 0, v_place)\n if deriv > 1:\n for f0, f1 in zip(fxc0, fxc1):\n if f0 is not None and f1 is not None:\n self.assertAlmostEqual(abs(f0-f1).max(), 0, f_place)\n if deriv > 2:\n for k0, k1 in zip(kxc0, kxc1):\n if k0 is not None and k1 is not None:\n self.assertAlmostEqual(abs(k0-k1).max(), 0, k_place)\n\n check('lda,')\n\n check('pw86,')\n check('pbe,', e_place=6, v_place=6, f_place=5, k_place=4)\n #?check('becke,')\n #?check('br,')\n #?check('LDAERF,')\n check('optx,')\n check('OPTXCORR,')\n check('RPBE,')\n check('TF,' )\n check('PW91,' , e_place=6, v_place=4, f_place=2, k_place=-1)\n check('m05,' , deriv=1, e_place=6, v_place=6)\n check('m052x,', deriv=1, e_place=6, v_place=6)\n check('m06,' , deriv=1, e_place=6, v_place=6)\n check('m062x,', deriv=1, e_place=6, v_place=6)\n check('m06l,' , deriv=1, e_place=6, v_place=6)\n check('TPSS,' , k_place=-4)\n #?check('REVTPSS,', deriv=1) # xcfun crash\n check('APBE,')\n check('BLOC,' , k_place=-5)\n check('PBEINT,', e_place=7, v_place=6, f_place=5, k_place=4)\n\n check(',vwn3')\n check(',vwn5')\n check(',pbe' , deriv=2)\n #?check(',br')\n #?check(',LDAERF')\n check(',lyp' , deriv=2)\n check(',SPBE' , deriv=2, e_place=1, v_place=1, f_place=0)\n check(',PW91' , deriv=2, f_place=3)\n check(',m052x', deriv=1)\n check(',m05' , deriv=1)\n check(',m06' , deriv=1)\n check(',m062x', deriv=1)\n check(',m06l' , deriv=1)\n check(',TPSS' , deriv=1, v_place=1)\n check(',REVTPSS', deriv=1, e_place=2, v_place=1)\n check(',p86' , deriv=2, e_place=5, v_place=5, f_place=3)\n check(',APBE' , deriv=2)\n check(',PBEINT' , deriv=1)\n check(',TPSSLOC', deriv=1, e_place=1, v_place=0)\n\n #?check('br')\n check('revpbe', deriv=2, e_place=6, v_place=6, f_place=5)\n check('b97' , deriv=2, e_place=6, v_place=5, f_place=3)\n #?check('b97_1')\n #?check('b97_2')\n check('SVWN')\n check('BLYP' , deriv=2)\n check('BP86' , deriv=2, e_place=5, v_place=5, f_place=3)\n check('OLYP' , deriv=2)\n check('KT1' , deriv=1)\n check('KT2' , deriv=1)\n #?check('KT3')\n check('PBE0' , deriv=2, e_place=6, v_place=6, f_place=5)\n check('B3P86' , deriv=2, e_place=5, v_place=5, f_place=3)\n check('B3P86G' , deriv=2, e_place=5, v_place=5, f_place=3)\n check('B3PW91' , deriv=2, f_place=4)\n check('B3PW91G', deriv=2, e_place=2, v_place=2, f_place=2)\n check('B3LYP' , deriv=2)\n check('B3LYP5' , deriv=2)\n check('B3LYPG' , deriv=2)\n check('O3LYP' , deriv=2)\n check('X3LYP' , deriv=2, e_place=7, v_place=5, f_place=2)\n check('CAMB3LYP', deriv=1)\n check('B97_1' , deriv=2, e_place=6, v_place=5, f_place=3)\n check('B97_2' , deriv=2, e_place=6, v_place=5, f_place=3)\n check('TPSSH' , deriv=1, v_place=1)\n\n def test_vs_libxc_uks(self):\n ao = dft.numint.eval_ao(mol, mf.grids.coords[:400], deriv=2)\n rho = dft.numint.eval_rho(mol, ao, dm, xctype='MGGA')\n rhoa = rho[:,:200]\n rhob = rhoa + rho[:,200:400]\n def check(xc_code, deriv=3, e_place=9, v_place=9, f_place=9, k_place=9):\n exc0, vxc0, fxc0, kxc0 = dft.libxc.eval_xc(xc_code, (rhoa, rhob), 1, deriv=deriv)\n exc1, vxc1, fxc1, kxc1 = dft.xcfun.eval_xc(xc_code, (rhoa, rhob), 1, deriv=deriv)\n self.assertAlmostEqual(abs(exc0-exc1).max(), 0, e_place)\n if deriv > 0:\n for v0, v1 in zip(vxc0, vxc1):\n if v0 is not None and v1 is not None:\n self.assertAlmostEqual(abs(v0-v1).max(), 0, v_place)\n if deriv > 1:\n for f0, f1 in zip(fxc0, fxc1):\n if f0 is not None and f1 is not None:\n self.assertAlmostEqual(abs(f0-f1).max(), 0, f_place)\n if deriv > 2 and kxc0 is not None:\n for k0, k1 in zip(kxc0, kxc1):\n if k0 is not None and k1 is not None:\n self.assertAlmostEqual(abs(k0-k1).max(), 0, k_place)\n\n check('lda,')\n\n check('pw86,')\n check('pbe,', e_place=6, v_place=6, f_place=5, k_place=4)\n #?check('becke,')\n #?check('br,')\n #?check('LDAERF,')\n check('optx,')\n check('OPTXCORR,')\n check('RPBE,')\n check('TF,' , e_place=0, v_place=-1, f_place=-2, k_place=-2)\n check('PW91,' , e_place=6, v_place=4, f_place=2, k_place=-1)\n check('m05,' , deriv=1, e_place=6, v_place=6)\n check('m052x,', deriv=1, e_place=6, v_place=6)\n check('m06,' , deriv=1, e_place=6, v_place=6)\n check('m062x,', deriv=1, e_place=6, v_place=6)\n check('m06l,' , deriv=1, e_place=6, v_place=6)\n check('TPSS,' , k_place=-4)\n #?check('REVTPSS,', deriv=1) # libxc crash\n check('APBE,')\n check('BLOC,' , k_place=-5)\n check('PBEINT,', e_place=7, v_place=6, f_place=5, k_place=4)\n\n check(',vwn3', e_place=2, v_place=1, f_place=1, k_place=0)\n check(',vwn5')\n check(',pbe' , deriv=2)\n #?check(',br')\n #?check(',LDAERF')\n check(',lyp' , deriv=2)\n check(',SPBE' , deriv=2, e_place=1, v_place=1, f_place=0)\n check(',PW91' , deriv=2, f_place=3)\n check(',m052x', deriv=1)\n check(',m05' , deriv=1)\n check(',m06' , deriv=1)\n check(',m062x', deriv=1)\n check(',m06l' , deriv=1)\n check(',TPSS' , deriv=1, v_place=1)\n check(',REVTPSS', deriv=1, e_place=2, v_place=1)\n check(',p86' , deriv=2, e_place=5, v_place=5, f_place=3)\n check(',APBE' , deriv=2)\n check(',PBEINT' , deriv=1)\n check(',TPSSLOC', deriv=1, e_place=1, v_place=0)\n\n #?check('br')\n check('revpbe', deriv=2, e_place=6, v_place=6, f_place=5)\n check('b97' , deriv=2, e_place=6, v_place=5, f_place=3)\n #?check('b97_1')\n #?check('b97_2')\n check('SVWN')\n check('BLYP' , deriv=2)\n check('BP86' , deriv=2, e_place=5, v_place=5, f_place=3)\n check('OLYP' , deriv=2)\n check('KT1' , deriv=1)\n check('KT2' , deriv=1)\n #?check('KT3')\n check('PBE0' , deriv=2, e_place=6, v_place=6, f_place=5)\n check('B3P86' , deriv=2, e_place=5, v_place=5, f_place=3)\n check('B3P86G' , deriv=2, e_place=3, v_place=2, f_place=2)\n check('B3PW91' , deriv=2, f_place=4)\n check('B3PW91G', deriv=2, e_place=2, v_place=2, f_place=2)\n check('B3LYP' , deriv=2)\n check('B3LYP5' , deriv=2)\n check('B3LYPG' , deriv=2, e_place=3, v_place=2, f_place=2)\n check('O3LYP' , deriv=2, e_place=3, v_place=2, f_place=1)\n check('X3LYP' , deriv=2, e_place=7, v_place=5, f_place=2)\n check('CAMB3LYP', deriv=1)\n check('B97_1' , deriv=2, e_place=6, v_place=5, f_place=3)\n check('B97_2' , deriv=2, e_place=6, v_place=5, f_place=3)\n check('TPSSH' , deriv=1, v_place=1)\n\n\nif __name__ == \"__main__\":\n print(\"Test xcfun\")\n unittest.main()\n\n" ]
[ [ "numpy.sqrt", "numpy.zeros_like", "numpy.ones", "numpy.empty", "numpy.append", "numpy.zeros", "numpy.tril_indices", "numpy.asarray", "numpy.argsort", "numpy.ndarray.__array_wrap__", "numpy.einsum", "numpy.where" ], [ "numpy.empty", "numpy.zeros", "numpy.result_type", "numpy.stack", "numpy.reshape", "numpy.asarray", "numpy.hstack", "numpy.sqrt", "numpy.iscomplexobj", "numpy.array" ], [ "numpy.arange", "numpy.allclose", "numpy.dot", "numpy.array" ] ]
MISStingting/NMTmodel
[ "970115d6f9fcd015d7daf3ad0e4844055e2af5d3", "970115d6f9fcd015d7daf3ad0e4844055e2af5d3" ]
[ "NMT/dataset.py", "tests/dataset_test.py" ]
[ "import tensorflow as tf\n\n\n# tf.enable_eager_execution()\n\n\nclass Dataset(object):\n\n def get_dataset(self, params, mode):\n if mode == tf.estimator.ModeKeys.TRAIN:\n features_path = params[\"train_features_file\"]\n labels_path = params[\"train_labels_file\"]\n elif mode == tf.estimator.ModeKeys.EVAL:\n features_path = params[\"eval_features_file\"]\n labels_path = params[\"eval_labels_file\"]\n\n elif mode == tf.estimator.ModeKeys.PREDICT:\n features_path = params[\"test_features_file\"]\n labels_path = params[\"test_labels_file\"]\n else:\n raise ValueError(\"wrong mode!!!\")\n\n features_dataset, labels_dataset = self._load_dataset(features_path, labels_path, mode)\n if mode == tf.estimator.ModeKeys.PREDICT:\n dataset = features_dataset.map(lambda x: tf.string_split([x]).values)\n dataset = dataset.shuffle(buffer_size=params[\"buffer_size\"],\n reshuffle_each_iteration=params[\"reshuffle_each_iteration\"])\n dataset = dataset.prefetch(buffer_size=params[\"buffer_size\"])\n dataset = dataset.map(lambda src: (src, tf.size(src)))\n dataset = dataset.padded_batch(batch_size=params[\"batch_size\"],\n padded_shapes=(tf.TensorShape([None]), tf.TensorShape([])),\n padding_values=(tf.constant(\"<blank>\"), 0))\n iterator = dataset.make_one_shot_iterator()\n src, src_len = iterator.get_next()\n features = {\n \"input\": src,\n \"input_length\": src_len\n }\n labels = None\n else:\n dataset = tf.data.Dataset.zip((features_dataset, labels_dataset))\n dataset = dataset.map(lambda x, y: (tf.string_split([x]).values, tf.string_split([y]).values))\n dataset = dataset.repeat(params[\"repeat\"]).shuffle(buffer_size=params[\"buffer_size\"],\n reshuffle_each_iteration=params[\n \"reshuffle_each_iteration\"])\n dataset = dataset.prefetch(buffer_size=params[\"buffer_size\"])\n if params[\"src_max_len\"] > 0:\n dataset = dataset.map(\n lambda src, tgt: (src[:params[\"src_max_len\"]], tgt))\n if params[\"tgt_max_len\"] > 0:\n dataset = dataset.map(\n lambda src, tgt: (src, tgt[:params[\"tgt_max_len\"]]))\n dataset = dataset.map(\n lambda src, tgt: (src,\n tf.concat(([\"<s>\"], tgt), 0),\n tf.concat((tgt, [\"</s>\"]), 0)),\n num_parallel_calls=params[\"num_parallel_calls\"])\n dataset = dataset.map(lambda src, tgt_in, tgt_out: (src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_out)))\n dataset = dataset.padded_batch(batch_size=params[\"batch_size\"],\n padded_shapes=(\n tf.TensorShape([None]),\n tf.TensorShape([None]),\n tf.TensorShape([None]),\n tf.TensorShape([]),\n tf.TensorShape([])),\n padding_values=(\n tf.constant(\"<blank>\", dtype=tf.string),\n tf.constant(\"<s>\", dtype=tf.string),\n tf.constant(\"</s>\", dtype=tf.string),\n 0,\n 0))\n iterator = dataset.make_one_shot_iterator()\n src, tgt_in, tgt_out, input_length, output_length = iterator.get_next()\n features = {\n \"input\": src,\n \"input_length\": input_length\n }\n labels = {\n \"output_in\": tgt_in,\n \"output_out\": tgt_out,\n \"output_length\": output_length\n }\n return features, labels\n\n @staticmethod\n def _load_dataset(features_path, labels_path, mode):\n ''' 从文件读取dataset\n :param mode:\n :return:\n '''\n if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:\n features_dataset = tf.data.TextLineDataset(filenames=features_path)\n labels_dataset = tf.data.TextLineDataset(filenames=labels_path)\n\n return features_dataset, labels_dataset\n elif mode == tf.estimator.ModeKeys.PREDICT:\n features_dataset = tf.data.TextLineDataset(filenames=features_path)\n return features_dataset, None\n\n\ndata_util = Dataset()\n", "import os\nimport yaml\nimport tensorflow as tf\nfrom NMTmodel.NMT.dataset import data_util\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\npar_dir = os.path.dirname(cur_dir)\n\n\nclass DatasetTest(tf.test.TestCase):\n def setUp(self):\n self.config_file = os.path.join(par_dir, \"config.yml\")\n\n def test_dataset(self):\n with tf.gfile.GFile(self.config_file, \"rb\") as f:\n params = yaml.load(stream=f.read(), Loader=yaml.FullLoader)\n data_util.get_dataset(params, mode=tf.estimator.ModeKeys.PREDICT)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.data.TextLineDataset", "tensorflow.size", "tensorflow.TensorShape", "tensorflow.concat", "tensorflow.data.Dataset.zip", "tensorflow.constant", "tensorflow.string_split" ], [ "tensorflow.gfile.GFile", "tensorflow.test.main" ] ]
EdisonLeeeee/GraphGallery
[ "4eec9c5136bda14809bd22584b26cc346cdb633b" ]
[ "graphgallery/nn/layers/tensorflow/dropout/dropout.py" ]
[ "import tensorflow as tf\r\nimport tensorflow.keras.backend as K\r\nfrom tensorflow.keras.layers import Layer, Dropout\r\n\r\n\r\nclass SparseDropout(Layer):\r\n def __init__(self, p=0.5):\r\n super().__init__()\r\n self.p = p\r\n\r\n def call(self, x, training=None):\r\n if training is None:\r\n training = K.learning_phase()\r\n\r\n if self.p and training:\r\n values = tf.nn.dropout(x.values, self.p)\r\n return tf.SparseTensor(x.indices, values, x.dense_shape)\r\n return x\r\n\r\n\r\nclass MixedDropout(Layer):\r\n def __init__(self, p=0.5):\r\n super().__init__()\r\n self.dense_dropout = Dropout(p)\r\n self.sparse_dropout = SparseDropout(p)\r\n\r\n def call(self, x):\r\n if K.is_sparse(x):\r\n return self.sparse_dropout(x)\r\n else:\r\n return self.dense_dropout(x)\r\n" ]
[ [ "tensorflow.keras.layers.Dropout", "tensorflow.keras.backend.is_sparse", "tensorflow.SparseTensor", "tensorflow.keras.backend.learning_phase", "tensorflow.nn.dropout" ] ]
ammaryasirnaich/mmdetection3d
[ "5e549546abbb2a7b43aab59e40e87599f61dcc4a" ]
[ "tests/test_data/test_datasets/test_kitti_dataset.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nimport os\nimport tempfile\n\nimport numpy as np\nimport pytest\nimport torch\n\nfrom mmdet3d.core.bbox import LiDARInstance3DBoxes, limit_period\nfrom mmdet3d.datasets import KittiDataset\n\n\ndef _generate_kitti_dataset_config():\n data_root = 'tests/data/kitti'\n ann_file = 'tests/data/kitti/kitti_infos_train.pkl'\n classes = ['Pedestrian', 'Cyclist', 'Car']\n pts_prefix = 'velodyne_reduced'\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n file_client_args=dict(backend='disk')),\n dict(\n type='MultiScaleFlipAug3D',\n img_scale=(1333, 800),\n pts_scale_ratio=1,\n flip=False,\n transforms=[\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[0, 0],\n scale_ratio_range=[1.0, 1.0],\n translation_std=[0, 0, 0]),\n dict(type='RandomFlip3D'),\n dict(\n type='PointsRangeFilter',\n point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(\n type='DefaultFormatBundle3D',\n class_names=classes,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ])\n ]\n modality = dict(use_lidar=True, use_camera=False)\n split = 'training'\n return data_root, ann_file, classes, pts_prefix, pipeline, modality, split\n\n\ndef _generate_kitti_multi_modality_dataset_config():\n data_root = 'tests/data/kitti'\n ann_file = 'tests/data/kitti/kitti_infos_train.pkl'\n classes = ['Pedestrian', 'Cyclist', 'Car']\n pts_prefix = 'velodyne_reduced'\n img_norm_cfg = dict(\n mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n file_client_args=dict(backend='disk')),\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug3D',\n img_scale=(1333, 800),\n pts_scale_ratio=1,\n flip=False,\n transforms=[\n dict(type='Resize', multiscale_mode='value', keep_ratio=True),\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[0, 0],\n scale_ratio_range=[1., 1.],\n translation_std=[0, 0, 0]),\n dict(type='RandomFlip3D'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(\n type='PointsRangeFilter',\n point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(\n type='DefaultFormatBundle3D',\n class_names=classes,\n with_label=False),\n dict(type='Collect3D', keys=['points', 'img'])\n ])\n ]\n modality = dict(use_lidar=True, use_camera=True)\n split = 'training'\n return data_root, ann_file, classes, pts_prefix, pipeline, modality, split\n\n\ndef test_getitem():\n np.random.seed(0)\n data_root, ann_file, classes, pts_prefix, \\\n _, modality, split = _generate_kitti_dataset_config()\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n file_client_args=dict(backend='disk')),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=True,\n with_label_3d=True,\n file_client_args=dict(backend='disk')),\n dict(\n type='ObjectSample',\n db_sampler=dict(\n data_root='tests/data/kitti/',\n # in coordinate system refactor, this test file is modified\n info_path='tests/data/kitti/kitti_dbinfos_train.pkl',\n rate=1.0,\n prepare=dict(\n filter_by_difficulty=[-1],\n filter_by_min_points=dict(Pedestrian=10)),\n classes=['Pedestrian', 'Cyclist', 'Car'],\n sample_groups=dict(Pedestrian=6))),\n dict(\n type='ObjectNoise',\n num_try=100,\n translation_std=[1.0, 1.0, 0.5],\n global_rot_range=[0.0, 0.0],\n rot_range=[-0.78539816, 0.78539816]),\n dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[-0.78539816, 0.78539816],\n scale_ratio_range=[0.95, 1.05]),\n dict(\n type='PointsRangeFilter',\n point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(\n type='ObjectRangeFilter',\n point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(type='PointShuffle'),\n dict(\n type='DefaultFormatBundle3D',\n class_names=['Pedestrian', 'Cyclist', 'Car']),\n dict(\n type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])\n ]\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n pipeline, classes, modality)\n data = kitti_dataset[0]\n points = data['points']._data\n gt_bboxes_3d = data['gt_bboxes_3d']._data\n gt_labels_3d = data['gt_labels_3d']._data\n expected_gt_bboxes_3d = torch.tensor(\n [[9.5081, -5.2269, -1.1370, 1.2288, 0.4915, 1.9353, 1.9988]])\n expected_gt_labels_3d = torch.tensor([0])\n rot_matrix = data['img_metas']._data['pcd_rotation']\n rot_angle = data['img_metas']._data['pcd_rotation_angle']\n horizontal_flip = data['img_metas']._data['pcd_horizontal_flip']\n vertical_flip = data['img_metas']._data['pcd_vertical_flip']\n expected_rot_matrix = torch.tensor([[0.8018, 0.5976, 0.0000],\n [-0.5976, 0.8018, 0.0000],\n [0.0000, 0.0000, 1.0000]])\n expected_rot_angle = 0.6404654291602163\n noise_angle = 0.20247319\n assert torch.allclose(expected_rot_matrix, rot_matrix, atol=1e-4)\n assert math.isclose(expected_rot_angle, rot_angle, abs_tol=1e-4)\n assert horizontal_flip is True\n assert vertical_flip is False\n\n # after coord system refactor\n expected_gt_bboxes_3d[:, :3] = \\\n expected_gt_bboxes_3d[:, :3] @ rot_matrix @ rot_matrix\n expected_gt_bboxes_3d[:, -1:] = -np.pi - expected_gt_bboxes_3d[:, -1:] \\\n + 2 * rot_angle - 2 * noise_angle\n expected_gt_bboxes_3d[:, -1:] = limit_period(\n expected_gt_bboxes_3d[:, -1:], period=np.pi * 2)\n assert points.shape == (780, 4)\n assert torch.allclose(\n gt_bboxes_3d.tensor, expected_gt_bboxes_3d, atol=1e-4)\n assert torch.all(gt_labels_3d == expected_gt_labels_3d)\n\n # test multi-modality KITTI dataset\n np.random.seed(0)\n point_cloud_range = [0, -40, -3, 70.4, 40, 1]\n img_norm_cfg = dict(\n mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\n multi_modality_pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4),\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),\n dict(\n type='Resize',\n img_scale=[(640, 192), (2560, 768)],\n multiscale_mode='range',\n keep_ratio=True),\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[-0.78539816, 0.78539816],\n scale_ratio_range=[0.95, 1.05],\n translation_std=[0.2, 0.2, 0.2]),\n dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),\n dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='PointShuffle'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle3D', class_names=classes),\n dict(\n type='Collect3D',\n keys=['points', 'img', 'gt_bboxes_3d', 'gt_labels_3d']),\n ]\n modality = dict(use_lidar=True, use_camera=True)\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n multi_modality_pipeline, classes, modality)\n data = kitti_dataset[0]\n img = data['img']._data\n lidar2img = data['img_metas']._data['lidar2img']\n\n expected_lidar2img = np.array(\n [[6.02943726e+02, -7.07913330e+02, -1.22748432e+01, -1.70942719e+02],\n [1.76777252e+02, 8.80879879e+00, -7.07936157e+02, -1.02568634e+02],\n [9.99984801e-01, -1.52826728e-03, -5.29071223e-03, -3.27567995e-01],\n [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])\n\n assert img.shape[:] == (3, 416, 1344)\n assert np.allclose(lidar2img, expected_lidar2img)\n\n\ndef test_evaluate():\n if not torch.cuda.is_available():\n pytest.skip('test requires GPU and torch+cuda')\n data_root, ann_file, classes, pts_prefix, \\\n pipeline, modality, split = _generate_kitti_dataset_config()\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n pipeline, classes, modality)\n boxes_3d = LiDARInstance3DBoxes(\n torch.tensor(\n [[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100]]))\n labels_3d = torch.tensor([\n 0,\n ])\n scores_3d = torch.tensor([0.5])\n metric = ['mAP']\n result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)\n ap_dict = kitti_dataset.evaluate([result], metric)\n assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_easy'],\n 3.0303030303030307)\n assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_moderate'],\n 3.0303030303030307)\n assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_hard'],\n 3.0303030303030307)\n\n\ndef test_show():\n from os import path as osp\n\n import mmcv\n\n from mmdet3d.core.bbox import LiDARInstance3DBoxes\n tmp_dir = tempfile.TemporaryDirectory()\n temp_dir = tmp_dir.name\n data_root, ann_file, classes, pts_prefix, \\\n pipeline, modality, split = _generate_kitti_dataset_config()\n kitti_dataset = KittiDataset(\n data_root, ann_file, split=split, modality=modality, pipeline=pipeline)\n boxes_3d = LiDARInstance3DBoxes(\n torch.tensor(\n [[46.1218, -4.6496, -0.9275, 0.5316, 1.4442, 1.7450, 1.1749],\n [33.3189, 0.1981, 0.3136, 0.5656, 1.2301, 1.7985, 1.5723],\n [46.1366, -4.6404, -0.9510, 0.5162, 1.6501, 1.7540, 1.3778],\n [33.2646, 0.2297, 0.3446, 0.5746, 1.3365, 1.7947, 1.5430],\n [58.9079, 16.6272, -1.5829, 1.5656, 3.9313, 1.4899, 1.5505]]))\n scores_3d = torch.tensor([0.1815, 0.1663, 0.5792, 0.2194, 0.2780])\n labels_3d = torch.tensor([0, 0, 1, 1, 2])\n result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d)\n results = [result]\n kitti_dataset.show(results, temp_dir, show=False)\n pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')\n gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')\n pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')\n mmcv.check_file_exist(pts_file_path)\n mmcv.check_file_exist(gt_file_path)\n mmcv.check_file_exist(pred_file_path)\n tmp_dir.cleanup()\n\n # test show with pipeline\n eval_pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4),\n dict(\n type='DefaultFormatBundle3D',\n class_names=classes,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ]\n tmp_dir = tempfile.TemporaryDirectory()\n temp_dir = tmp_dir.name\n kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)\n pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')\n gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')\n pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')\n mmcv.check_file_exist(pts_file_path)\n mmcv.check_file_exist(gt_file_path)\n mmcv.check_file_exist(pred_file_path)\n tmp_dir.cleanup()\n\n # test multi-modality show\n tmp_dir = tempfile.TemporaryDirectory()\n temp_dir = tmp_dir.name\n _, _, _, _, multi_modality_pipeline, modality, _ = \\\n _generate_kitti_multi_modality_dataset_config()\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n multi_modality_pipeline, classes, modality)\n kitti_dataset.show(results, temp_dir, show=False)\n pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')\n gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')\n pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')\n img_file_path = osp.join(temp_dir, '000000', '000000_img.png')\n img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')\n img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')\n mmcv.check_file_exist(pts_file_path)\n mmcv.check_file_exist(gt_file_path)\n mmcv.check_file_exist(pred_file_path)\n mmcv.check_file_exist(img_file_path)\n mmcv.check_file_exist(img_pred_path)\n mmcv.check_file_exist(img_gt_file)\n tmp_dir.cleanup()\n\n # test multi-modality show with pipeline\n eval_pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4),\n dict(type='LoadImageFromFile'),\n dict(\n type='DefaultFormatBundle3D',\n class_names=classes,\n with_label=False),\n dict(type='Collect3D', keys=['points', 'img'])\n ]\n tmp_dir = tempfile.TemporaryDirectory()\n temp_dir = tmp_dir.name\n kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)\n pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')\n gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')\n pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')\n img_file_path = osp.join(temp_dir, '000000', '000000_img.png')\n img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')\n img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')\n mmcv.check_file_exist(pts_file_path)\n mmcv.check_file_exist(gt_file_path)\n mmcv.check_file_exist(pred_file_path)\n mmcv.check_file_exist(img_file_path)\n mmcv.check_file_exist(img_pred_path)\n mmcv.check_file_exist(img_gt_file)\n tmp_dir.cleanup()\n\n\ndef test_format_results():\n from mmdet3d.core.bbox import LiDARInstance3DBoxes\n data_root, ann_file, classes, pts_prefix, \\\n pipeline, modality, split = _generate_kitti_dataset_config()\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n pipeline, classes, modality)\n # coord system refactor\n boxes_3d = LiDARInstance3DBoxes(\n torch.tensor(\n [[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))\n labels_3d = torch.tensor([\n 0,\n ])\n scores_3d = torch.tensor([0.5])\n result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)\n results = [result]\n result_files, tmp_dir = kitti_dataset.format_results(results)\n expected_name = np.array(['Pedestrian'])\n expected_truncated = np.array([0.])\n expected_occluded = np.array([0])\n # coord sys refactor\n expected_alpha = np.array(-3.3410306 + np.pi)\n expected_bbox = np.array([[710.443, 144.00221, 820.29114, 307.58667]])\n expected_dimensions = np.array([[1.2, 1.89, 0.48]])\n expected_location = np.array([[1.8399826, 1.4700007, 8.410018]])\n expected_rotation_y = np.array([0.0100])\n expected_score = np.array([0.5])\n expected_sample_idx = np.array([0])\n assert np.all(result_files[0]['name'] == expected_name)\n assert np.allclose(result_files[0]['truncated'], expected_truncated)\n assert np.all(result_files[0]['occluded'] == expected_occluded)\n assert np.allclose(result_files[0]['alpha'], expected_alpha, 1e-3)\n assert np.allclose(result_files[0]['bbox'], expected_bbox)\n assert np.allclose(result_files[0]['dimensions'], expected_dimensions)\n assert np.allclose(result_files[0]['location'], expected_location)\n assert np.allclose(result_files[0]['rotation_y'], expected_rotation_y,\n 1e-3)\n assert np.allclose(result_files[0]['score'], expected_score)\n assert np.allclose(result_files[0]['sample_idx'], expected_sample_idx)\n tmp_dir.cleanup()\n\n\ndef test_bbox2result_kitti():\n data_root, ann_file, classes, pts_prefix, \\\n pipeline, modality, split = _generate_kitti_dataset_config()\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n pipeline, classes, modality)\n boxes_3d = LiDARInstance3DBoxes(\n torch.tensor(\n [[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))\n labels_3d = torch.tensor([\n 0,\n ])\n scores_3d = torch.tensor([0.5])\n result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)\n results = [result]\n tmp_dir = tempfile.TemporaryDirectory()\n temp_kitti_result_dir = tmp_dir.name\n det_annos = kitti_dataset.bbox2result_kitti(\n results, classes, submission_prefix=temp_kitti_result_dir)\n expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')\n expected_name = np.array(['Pedestrian'])\n expected_dimensions = np.array([1.2000, 1.8900, 0.4800])\n # coord system refactor (reverse sign)\n expected_rotation_y = 0.0100\n expected_score = np.array([0.5])\n assert np.all(det_annos[0]['name'] == expected_name)\n assert np.allclose(det_annos[0]['rotation_y'], expected_rotation_y, 1e-3)\n assert np.allclose(det_annos[0]['score'], expected_score)\n assert np.allclose(det_annos[0]['dimensions'], expected_dimensions)\n assert os.path.exists(expected_file_path)\n tmp_dir.cleanup()\n\n tmp_dir = tempfile.TemporaryDirectory()\n temp_kitti_result_dir = tmp_dir.name\n boxes_3d = LiDARInstance3DBoxes(torch.tensor([]))\n labels_3d = torch.tensor([])\n scores_3d = torch.tensor([])\n empty_result = dict(\n boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)\n results = [empty_result]\n det_annos = kitti_dataset.bbox2result_kitti(\n results, classes, submission_prefix=temp_kitti_result_dir)\n expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')\n assert os.path.exists(expected_file_path)\n tmp_dir.cleanup()\n\n\ndef test_bbox2result_kitti2d():\n data_root, ann_file, classes, pts_prefix, \\\n pipeline, modality, split = _generate_kitti_dataset_config()\n kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,\n pipeline, classes, modality)\n bboxes = np.array([[[46.1218, -4.6496, -0.9275, 0.5316, 0.5],\n [33.3189, 0.1981, 0.3136, 0.5656, 0.5]],\n [[46.1366, -4.6404, -0.9510, 0.5162, 0.5],\n [33.2646, 0.2297, 0.3446, 0.5746, 0.5]]])\n det_annos = kitti_dataset.bbox2result_kitti2d([bboxes], classes)\n expected_name = np.array(\n ['Pedestrian', 'Pedestrian', 'Cyclist', 'Cyclist'])\n expected_bbox = np.array([[46.1218, -4.6496, -0.9275, 0.5316],\n [33.3189, 0.1981, 0.3136, 0.5656],\n [46.1366, -4.6404, -0.951, 0.5162],\n [33.2646, 0.2297, 0.3446, 0.5746]])\n expected_score = np.array([0.5, 0.5, 0.5, 0.5])\n assert np.all(det_annos[0]['name'] == expected_name)\n assert np.allclose(det_annos[0]['bbox'], expected_bbox)\n assert np.allclose(det_annos[0]['score'], expected_score)\n" ]
[ [ "numpy.allclose", "torch.tensor", "numpy.random.seed", "numpy.isclose", "torch.all", "numpy.all", "torch.cuda.is_available", "numpy.array", "torch.allclose" ] ]
millingermarkus/pypsa-eur
[ "2e39a21299036c0cec86fe4707de06a42ec15d62" ]
[ "scripts/build_load_data.py" ]
[ "# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors\n#\n# SPDX-License-Identifier: MIT\n\n\"\"\"\n\nThis rule downloads the load data from `Open Power System Data Time series <https://data.open-power-system-data.org/time_series/>`_. For all countries in the network, the per country load timeseries with suffix ``_load_actual_entsoe_transparency`` are extracted from the dataset. After filling small gaps linearly and large gaps by copying time-slice of a given period, the load data is exported to a ``.csv`` file.\n\nRelevant Settings\n-----------------\n\n.. code:: yaml\n\n snapshots:\n\n load:\n interpolate_limit:\n time_shift_for_large_gaps:\n manual_adjustments:\n\n\n.. seealso::\n Documentation of the configuration file ``config.yaml`` at\n :ref:`load_cf`\n\nInputs\n------\n\n\nOutputs\n-------\n\n- ``resource/time_series_60min_singleindex_filtered.csv``:\n\n\n\"\"\"\n\nimport logging\nlogger = logging.getLogger(__name__)\nfrom _helpers import configure_logging\n\nimport pandas as pd\nimport numpy as np\nimport dateutil\nfrom pandas import Timedelta as Delta\n\n\ndef load_timeseries(fn, years, countries, powerstatistics=True):\n \"\"\"\n Read load data from OPSD time-series package version 2020-10-06.\n\n Parameters\n ----------\n years : None or slice()\n Years for which to read load data (defaults to\n slice(\"2018\",\"2019\"))\n fn : str\n File name or url location (file format .csv)\n countries : listlike\n Countries for which to read load data.\n powerstatistics: bool\n Whether the electricity consumption data of the ENTSOE power\n statistics (if true) or of the ENTSOE transparency map (if false)\n should be parsed.\n\n Returns\n -------\n load : pd.DataFrame\n Load time-series with UTC timestamps x ISO-2 countries\n \"\"\"\n logger.info(f\"Retrieving load data from '{fn}'.\")\n\n pattern = 'power_statistics' if powerstatistics else '_transparency'\n pattern = f'_load_actual_entsoe_{pattern}'\n rename = lambda s: s[:-len(pattern)]\n date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True)\n return (pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)\n .filter(like=pattern)\n .rename(columns=rename)\n .dropna(how=\"all\", axis=0)\n .rename(columns={'GB_UKM' : 'GB'})\n .filter(items=countries)\n .loc[years])\n\n\ndef consecutive_nans(ds):\n return (ds.isnull().astype(int)\n .groupby(ds.notnull().astype(int).cumsum()[ds.isnull()])\n .transform('sum').fillna(0))\n\n\ndef fill_large_gaps(ds, shift):\n \"\"\"\n Fill up large gaps with load data from the previous week.\n\n This function fills gaps ragning from 3 to 168 hours (one week).\n \"\"\"\n shift = Delta(shift)\n nhours = shift / np.timedelta64(1, 'h')\n if (consecutive_nans(ds) > nhours).any():\n logger.warning('There exist gaps larger then the time shift used for '\n 'copying time slices.')\n time_shift = pd.Series(ds.values, ds.index + shift)\n return ds.where(ds.notnull(), time_shift.reindex_like(ds))\n\n\ndef nan_statistics(df):\n def max_consecutive_nans(ds):\n return (ds.isnull().astype(int)\n .groupby(ds.notnull().astype(int).cumsum())\n .sum().max())\n consecutive = df.apply(max_consecutive_nans)\n total = df.isnull().sum()\n max_total_per_month = df.isnull().resample('m').sum().max()\n return pd.concat([total, consecutive, max_total_per_month],\n keys=['total', 'consecutive', 'max_total_per_month'], axis=1)\n\n\ndef copy_timeslice(load, cntry, start, stop, delta):\n start = pd.Timestamp(start)\n stop = pd.Timestamp(stop)\n if start-delta in load.index and stop in load.index and cntry in load:\n load.loc[start:stop, cntry] = load.loc[start-delta:stop-delta, cntry].values\n\n\ndef manual_adjustment(load, powerstatistics):\n \"\"\"\n Adjust gaps manual for load data from OPSD time-series package.\n\n 1. For the ENTSOE power statistics load data (if powerstatistics is True)\n\n Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the\n same load curve as Serbia and Albania the same as Macdedonia, both scaled\n by the corresponding ratio of total energy consumptions reported by\n IEA Data browser [0] for the year 2013.\n\n 2. For the ENTSOE transparency load data (if powerstatistics is False)\n\n Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the\n same load curve as Montenegro, scaled by the corresponding ratio of total energy\n consumptions reported by IEA Data browser [0] for the year 2016.\n\n [0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons\n\n\n Parameters\n ----------\n load : pd.DataFrame\n Load time-series with UTC timestamps x ISO-2 countries\n powerstatistics: bool\n Whether argument load comprises the electricity consumption data of\n the ENTSOE power statistics or of the ENTSOE transparency map\n\n Returns\n -------\n load : pd.DataFrame\n Manual adjusted and interpolated load time-series with UTC\n timestamps x ISO-2 countries\n \"\"\"\n\n if powerstatistics:\n if 'MK' in load.columns:\n if 'AL' not in load.columns or load.AL.isnull().values.all():\n load['AL'] = load['MK'] * (4.1 / 7.4)\n if 'RS' in load.columns:\n if 'KV' not in load.columns or load.KV.isnull().values.all():\n load['KV'] = load['RS'] * (4.8 / 27.)\n\n copy_timeslice(load, 'GR', '2015-08-11 21:00', '2015-08-15 20:00', Delta(weeks=1))\n copy_timeslice(load, 'AT', '2018-12-31 22:00', '2019-01-01 22:00', Delta(days=2))\n copy_timeslice(load, 'CH', '2010-01-19 07:00', '2010-01-19 22:00', Delta(days=1))\n copy_timeslice(load, 'CH', '2010-03-28 00:00', '2010-03-28 21:00', Delta(days=1))\n # is a WE, so take WE before\n copy_timeslice(load, 'CH', '2010-10-08 13:00', '2010-10-10 21:00', Delta(weeks=1))\n copy_timeslice(load, 'CH', '2010-11-04 04:00', '2010-11-04 22:00', Delta(days=1))\n copy_timeslice(load, 'NO', '2010-12-09 11:00', '2010-12-09 18:00', Delta(days=1))\n # whole january missing\n copy_timeslice(load, 'GB', '2009-12-31 23:00', '2010-01-31 23:00', Delta(days=-364))\n\n else:\n if 'ME' in load:\n if 'AL' not in load and 'AL' in countries:\n load['AL'] = load.ME * (5.7/2.9)\n if 'MK' not in load and 'MK' in countries:\n load['MK'] = load.ME * (6.7/2.9)\n copy_timeslice(load, 'BG', '2018-10-27 21:00', '2018-10-28 22:00', Delta(weeks=1))\n\n return load\n\n\nif __name__ == \"__main__\":\n\n if 'snakemake' not in globals():\n from _helpers import mock_snakemake\n snakemake = mock_snakemake('build_load_data')\n\n configure_logging(snakemake)\n\n config = snakemake.config\n powerstatistics = config['load']['power_statistics']\n interpolate_limit = config['load']['interpolate_limit']\n countries = config['countries']\n snapshots = pd.date_range(freq='h', **config['snapshots'])\n years = slice(snapshots[0], snapshots[-1])\n time_shift = config['load']['time_shift_for_large_gaps']\n\n load = load_timeseries(snakemake.input[0], years, countries, powerstatistics)\n\n if config['load']['manual_adjustments']:\n load = manual_adjustment(load, powerstatistics)\n\n logger.info(f\"Linearly interpolate gaps of size {interpolate_limit} and less.\")\n load = load.interpolate(method='linear', limit=interpolate_limit)\n\n logger.info(\"Filling larger gaps by copying time-slices of period \"\n f\"'{time_shift}'.\")\n load = load.apply(fill_large_gaps, shift=time_shift)\n\n assert not load.isna().any().any(), (\n 'Load data contains nans. Adjust the parameters '\n '`time_shift_for_large_gaps` or modify the `manual_adjustment` function '\n 'for implementing the needed load data modifications.')\n\n load.to_csv(snakemake.output[0])\n\n" ]
[ [ "pandas.Series", "pandas.date_range", "numpy.timedelta64", "pandas.read_csv", "pandas.Timedelta", "pandas.concat", "pandas.Timestamp" ] ]
daiki-kimura/commonsense-rl
[ "5513926957b6501ce9cfa46f77f8f2c1c4892fa5" ]
[ "utils_twc/kg.py" ]
[ "import sys\nimport networkx as nx\nimport logging\nimport json\nimport requests\nfrom pathlib import Path\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nfrom utils_twc.generic import escape_entities\n\n# Logging formatting\nFORMAT = '%(asctime)s %(message)s'\nlogging.basicConfig(format=FORMAT, level='INFO', stream=sys.stdout)\nkg = {}\nsource_paths= defaultdict(dict)\n\n\ndef shortest_path_subgraph(kg_graph, prev_graph, nodes, inventory_entities=None, command_entities=None, path_len=2, add_all_path=False):\n if inventory_entities is None:\n inventory_entities = []\n if command_entities is None:\n command_entities = []\n # Get non-neighbor nodes: nodes without edges between them\n world_graph = kg_graph.subgraph(list(prev_graph.nodes)+nodes).copy()\n world_graph = nx.compose(prev_graph,world_graph)\n world_graph.remove_edges_from(nx.selfloop_edges(world_graph))\n\n if path_len < 2:\n return world_graph\n triplets = []\n # Add command related relations\n pruned_entities = list(set(command_entities)-set(inventory_entities))\n if pruned_entities:\n for src_et in inventory_entities:\n for tgt_et in pruned_entities:\n if src_et != tgt_et:\n try:\n pair_dist = nx.shortest_path_length(kg_graph, source=src_et, target=tgt_et)\n except nx.NetworkXNoPath:\n pair_dist = 0\n if pair_dist >= 1 and pair_dist <= path_len:\n triplets.append([src_et, tgt_et, 'relatedTo'])\n else: # no items in the pruned entities, won't happen\n for entities in command_entities:\n for src_et in entities:\n for tgt_et in entities:\n if src_et != tgt_et:\n try:\n pair_dist = nx.shortest_path_length(kg_graph, source=src_et, target=tgt_et)\n except nx.NetworkXNoPath:\n pair_dist=0\n if pair_dist >= 1 and pair_dist <= path_len:\n triplets.append([src_et, tgt_et, 'relatedTo'])\n world_graph, _= add_triplets_to_graph(world_graph, triplets)\n return world_graph\n\n\ndef construct_graph(triplets):\n graph = nx.DiGraph()\n entities = {}\n for [e1, e2, r] in triplets:\n e1 = e1.lower().strip()\n e2 = e2.lower().strip()\n r = r.lower().strip()\n if e1 not in entities:\n graph.add_node(e1)\n entities[e1] = e1\n if e2 not in entities:\n graph.add_node(e2)\n entities[e2] = e2\n # Add Edge information\n if graph.has_edge(e1, e2):\n if r not in graph.edges[e1, e2]['relation']:\n graph.edges[e1, e2]['relation'] += ' ' + r\n else:\n graph.add_edge(e1, e2, relation=r)\n return graph, entities\n\n\ndef add_triplets_to_graph(graph, triplets):\n entities = dict(graph.nodes.data())\n for [e1, e2, r] in triplets:\n e1 = e1.lower().strip()\n e2 = e2.lower().strip()\n r = r.lower().strip()\n if e1 not in entities:\n graph.add_node(e1)\n entities[e1] = e1\n if e2 not in entities:\n graph.add_node(e2)\n entities[e2] = e2\n # Add Edge information\n if graph.has_edge(e1, e2):\n if r not in graph.edges[e1, e2]['relation']:\n graph.edges[e1, e2]['relation'] += ' ' + r\n else:\n graph.add_edge(e1, e2, relation=r)\n return graph, entities\n\n\ndef draw_graph(graph, title=\"cleanup\", show_relation=True, weights=None, pos=None):\n if not pos:\n pos = nx.spring_layout(graph, k=0.95)\n if weights:\n nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000, node_color=weights.tolist(),\n vmin=np.min(weights), vmax=np.max(weights), node_shape='o', alpha=0.9, font_size=8, with_labels=True,\n label=title,cmap='Blues')\n else:\n nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000, node_color='pink',\n node_shape='o', alpha=0.9, font_size=8, with_labels=True, label=title)\n if show_relation:\n p_edge = nx.draw_networkx_edge_labels(graph, pos, font_size=6, font_color='red',\n edge_labels=nx.get_edge_attributes(graph, 'relation'))\n\n\ndef draw_graph_colormap(graph,node_weights, showbar=False, cmap='YlGnBu'):\n # node_weights: maps node id/name to attention weights\n pos = nx.spring_layout(graph, k=0.95)\n weights = []\n for node in graph.nodes:\n weights.append(node_weights[node])\n # cmap = plt.cm.YlGnBu#RdBu\n cmap = plt.get_cmap(cmap)\n vmin = np.min(weights)\n vmax = np.max(weights)\n nx.draw(graph, pos, edge_color='black', width=1, linewidths=1, node_size=1000,\n node_color=weights, vmin=vmin, vmax=vmax, cmap=cmap,\n node_shape='o', alpha=0.9, font_size=8, with_labels=True, label='Attention')\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))\n sm._A = []\n if showbar:\n plt.colorbar(sm)\n plt.show()\n\n\ndef construct_kg(filename: str, print_every=1e6, cache_load=True, logger=logging.getLogger(__name__)) -> (nx.DiGraph, list, set):\n # access edges with graph.edges.data('relation')\n if 'graph' in kg and cache_load:\n return kg['graph'], kg['triplets'], kg['entities']\n\n path = Path(filename)\n if not path.exists():\n filename = './kg/conceptnet/kg.txt'\n\n triplets = []\n with open(filename, 'r') as fp:\n for idx, line in enumerate(fp):\n e1, r, e2 = line.rstrip(\"\\n\").rsplit()\n triplets.append([e1.lower().strip(), e2.lower().strip(), r.lower().strip()])\n if idx % print_every == 0:\n print(\"*\",end='')\n [graph, entities] = construct_graph(triplets)\n graph = graph.to_undirected(as_view=True) # Since a->b ==> b->a\n if cache_load:\n kg['graph'] = graph\n kg['triplets'] = triplets\n kg['entities'] = entities\n return graph, triplets, entities\n\n\nclass RelationExtractor:\n def __init__(self, tokenizer, openie_url=\"http://localhost:9000/\"):\n \"\"\"\n :param tokenizer:\n :param openie_url: server url for Stanford Core NLPOpen IE\n \"\"\"\n self.tokenizer = tokenizer\n self.openie_url = openie_url\n self.kg_vocab = {}\n self.agent_loc = ''\n\n def call_stanford_openie(self,sentence):\n querystring = {\n \"properties\": \"%7B%22annotators%22%3A%20%22openie%22%7D\",\n \"pipelineLanguage\": \"en\"}\n response = requests.request(\"POST\", self.openie_url, data=sentence, params=querystring)\n response = json.JSONDecoder().decode(response.text)\n return response\n\n def fetch_triplets(self,text, current_graph, prev_action=None):\n triplets = []\n remove = []\n prev_remove = []\n link = []\n c_id = len(self.kg_vocab.keys())\n obs = self.tokenizer.clean_string(text, preprocess=True)\n dirs = ['north', 'south', 'east', 'west']\n obs = str(obs)\n doc = self.tokenizer.nlp_eval(obs)\n sents = {}\n try:\n sents = self.call_stanford_openie(doc.text)['sentences']\n except:\n print(\"Error in connecting to Stanford CoreNLP OpenIE Server\")\n for ov in sents:\n tokens = ov[\"tokens\"]\n triple = ov['openie']\n for tr in triple:\n h, r, t = tr['subject'].lower(), tr['relation'].lower(), tr['object'].lower()\n if h == 'we':\n h = 'you'\n if r == 'are in':\n r = \"'ve entered\"\n\n if h == 'it':\n break\n triplets.append((h, r, t))\n\n room = \"\"\n room_set = False\n for rule in triplets:\n h, r, t = rule\n if 'entered' in r or 'are in' in r or 'walked' in r:\n prev_remove.append(r)\n if not room_set:\n room = t\n room_set = True\n if 'should' in r:\n prev_remove.append(r)\n if 'see' in r or 'make out' in r:\n link.append((r, t))\n remove.append(r)\n # else:\n # link.append((r, t))\n\n prev_room = self.agent_loc\n self.agent_loc = room\n add_rules = []\n if prev_action is not None:\n for d in dirs:\n if d in prev_action and room != \"\":\n add_rules.append((prev_room, d + ' of', room))\n prev_room_subgraph = None\n prev_you_subgraph = None\n\n for sent in doc.sents:\n sent = sent.text\n if sent == ',' or sent == 'hm .':\n continue\n if 'exit' in sent or 'entranceway' in sent:\n for d in dirs:\n if d in sent:\n triplets.append((room, 'has', 'exit to ' + d))\n if prev_room != \"\":\n graph_copy = current_graph.copy()\n graph_copy.remove_edge('you', prev_room)\n con_cs = [graph_copy.subgraph(c) for c in nx.weakly_connected_components(graph_copy)]\n\n for con_c in con_cs:\n if prev_room in con_c.nodes:\n prev_room_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)\n if 'you' in con_c.nodes:\n prev_you_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)\n\n for l in link:\n add_rules.append((room, l[0], l[1]))\n\n for rule in triplets:\n h, r, t = rule\n if r == 'is in':\n if t == 'room':\n t = room\n if r not in remove:\n add_rules.append((h, r, t))\n edges = list(current_graph.edges)\n for edge in edges:\n r = 'relatedTo'\n if 'relation' in current_graph[edge[0]][edge[1]]:\n r = current_graph[edge[0]][edge[1]]['relation']\n if r in prev_remove:\n current_graph.remove_edge(*edge)\n\n if prev_you_subgraph is not None:\n current_graph.remove_edges_from(prev_you_subgraph.edges)\n\n for rule in add_rules:\n u = '_'.join(str(rule[0]).split())\n v = '_'.join(str(rule[2]).split())\n if u != 'it' and u not in self.kg_vocab:\n self.kg_vocab[u] = c_id\n c_id += 1\n if v != 'it' and v not in self.kg_vocab:\n self.kg_vocab[v] = c_id\n c_id += 1\n skip_flag = False\n for skip_token in self.tokenizer.ignore_list:\n if skip_token in u or skip_token in v:\n skip_flag = True\n if u != 'it' and v != 'it' and not skip_flag:\n r = str(rule[1]).lower()\n if not rule[1] or rule[1] == '':\n r = 'relatedTo'\n current_graph.add_edge(str(rule[0]).lower(), str(rule[2]).lower(), relation=r)\n prev_edges = current_graph.edges\n if prev_room_subgraph is not None:\n current_graph.add_edges_from(prev_room_subgraph.edges)\n current_edges = current_graph.edges\n return current_graph, add_rules\n\n\ndef khop_neighbor_graph(graph, entities, cutoff=1, max_khop_degree=None):\n all_entities = []\n for et in entities:\n candidates = nx.single_source_shortest_path(graph, et, cutoff=cutoff).keys()\n if not max_khop_degree or len(candidates)<=max_khop_degree:\n all_entities.extend(list(candidates))\n return graph.subgraph(set(entities)|set(all_entities))\n\n\ndef ego_graph_seed_expansion(graph, seed, radius, undirected=True, max_degree=None):\n working_graph = graph\n if undirected:\n working_graph = graph.to_undirected()\n marked = set(seed)\n nodes = set(seed)\n\n for _ in range(radius):\n border = set()\n for node in marked:\n neighbors = {n for n in working_graph[node]}\n if max_degree is None or len(neighbors) <= max_degree:\n border |= neighbors\n nodes |= border\n marked = border\n\n return graph.subgraph(nodes)\n\n\ndef shortest_path_seed_expansion(graph, seed, cutoff=None, undirected=True, keep_all=True):\n nodes = set(seed)\n seed = list(seed)\n\n working_graph = graph\n if undirected:\n working_graph = graph.to_undirected()\n for i in range(len(seed)):\n start = i + 1 if undirected else 0\n for j in range(start, len(seed)):\n try:\n if not keep_all:\n path = nx.shortest_path(working_graph, seed[i], seed[j])\n if cutoff is None or len(path) <= cutoff:\n nodes |= set(path)\n else:\n paths = nx.all_shortest_paths(working_graph, seed[i], seed[j])\n for p in paths:\n if cutoff is None or len(p) <= cutoff:\n nodes |= set(p)\n except nx.NetworkXNoPath:\n continue\n return graph.subgraph(nodes)\n\n\ndef load_manual_graphs(path):\n path = Path(path)\n manual_world_graphs = {}\n if not path.exists():\n print('None Found.')\n return manual_world_graphs\n\n files = path.rglob(\"conceptnet_manual_subgraph-*.tsv\")\n for file in files:\n game_id = str(file).split('-')[-1].split('.')[0]\n graph, triplets, entities = construct_kg(file, cache_load=False)\n manual_world_graphs[game_id]={}\n manual_world_graphs[game_id]['graph'] = graph\n manual_world_graphs[game_id]['triplets'] = triplets\n manual_world_graphs[game_id]['entities'] = entities\n print(' DONE')\n return manual_world_graphs\n\n\n\n\ndef kg_match(extractor, target_entities, kg_entities):\n result = set()\n kg_entities = escape_entities(kg_entities)\n for e in target_entities:\n e = e.lower().strip()\n result |= extractor(e, kg_entities)\n return result\n\n\ndef save_graph_tsv(graph, path):\n relation_map = nx.get_edge_attributes(graph, 'relation')\n lines = []\n for n1, n2 in graph.edges:\n relations = relation_map[n1, n2].split()\n for r in relations:\n lines.append(f'{n1}\\t{r}\\t{n2}\\n')\n with open(path, 'w') as f:\n f.writelines(lines)\n\n\nif __name__ == '__main__':\n from utils_twc import extractor\n from utils_twc.nlp import Tokenizer\n\n tk_extractor = extractor.get_extractor('max')\n tokenizer = Tokenizer(extractor=tk_extractor)\n rel_extract = RelationExtractor(tokenizer,openie_url='http://iqa962.sl.cloud9.ibm.com:9000/')\n # text = 'On the table, you see an apple, a hat, a key and an umbrella. '\n text = \"You've just walked into a Living Room. You try to gain information on your \" \\\n \"surroundings by using a technique you call looking. You can see a closet. \" \\\n \"You idly wonder how they came up with the name TextWorld for this place. \" \\\n \"It's pretty fitting. A closed standard looking antique trunk is in the room. \" \\\n \"You can see a table. The table is usual. On the table you see an apple, a mug, \" \\\n \"a newspaper, a note, a hat and a pencil. You smell a sickening smell, and follow \" \\\n \"it to a couch. The couch is standard. But the thing is empty. Hm. Oh well You see a \" \\\n \"gleam over in a corner, where you can see a tv stand. The tv stand is ordinary. \" \\\n \"On the tv stand you can make out a tv. You don't like doors? Why not try going east, \" \\\n \"that entranceway is unguarded. You are carrying nothing.\"\n sents = text\n # clauses = clausie.clausie(text)\n # propositions = clausie.extract_propositions(clauses)\n # sents = ''\n # for prop in propositions:\n # sent = clausie.proposition_text_str(prop)\n # sents += sent\n # print(sent)\n graph, add_rules = rel_extract.fetch_triplets(sents, nx.DiGraph())\n print(add_rules)\n\n" ]
[ [ "matplotlib.pyplot.Normalize", "matplotlib.pyplot.show", "numpy.max", "matplotlib.pyplot.get_cmap", "numpy.min", "matplotlib.pyplot.colorbar" ] ]
714627034/Paddle-Lite
[ "015ba88a4d639db0b73603e37f83e47be041a4eb" ]
[ "lite/tests/unittest_py/op/common/test_unique_with_counts_op_base.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.append('..')\n\nfrom program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\nimport numpy as np\nfrom functools import partial\nfrom typing import Optional, List, Callable, Dict, Any, Set\nimport unittest\nimport hypothesis\nimport hypothesis.strategies as st\n\n\ndef sample_program_configs(draw):\n in_shape = draw(\n st.lists(\n st.integers(\n min_value=2, max_value=100), min_size=1, max_size=1))\n\n def generate_IndexTensor():\n return np.random.randint(1, 5, size=in_shape).astype(np.int32)\n\n unique_with_counts_op = OpConfig(\n type=\"unique_with_counts\",\n inputs={\"X\": [\"input_data\"]},\n outputs={\n \"Out\": [\"output_data\"],\n \"Index\": [\"Index_data\"],\n \"Count\": [\"Count_data\"]\n },\n attrs={\"dtype\": 2})\n program_config = ProgramConfig(\n ops=[unique_with_counts_op],\n weights={\n \"Index_data\": TensorConfig(data_gen=partial(generate_IndexTensor))\n },\n inputs={\"input_data\": TensorConfig(shape=in_shape), },\n outputs=[\"output_data\", \"Index_data\", \"Count_data\"])\n return program_config\n" ]
[ [ "numpy.random.randint" ] ]
lqkweb/learnMLflow
[ "13c5decaebba95b1b90f92021be35e343b4764af", "13c5decaebba95b1b90f92021be35e343b4764af" ]
[ "scikit-learn-master/sklearn/linear_model/ridge.py", "scikit-learn-master/sklearn/utils/tests/test_utils.py" ]
[ "\"\"\"\nRidge regression\n\"\"\"\n\n# Author: Mathieu Blondel <[email protected]>\n# Reuben Fletcher-Costin <[email protected]>\n# Fabian Pedregosa <[email protected]>\n# Michael Eickenberg <[email protected]>\n# License: BSD 3 clause\n\n\nfrom abc import ABCMeta, abstractmethod\nimport warnings\n\nimport numpy as np\nfrom scipy import linalg\nfrom scipy import sparse\nfrom scipy.sparse import linalg as sp_linalg\n\nfrom .base import LinearClassifierMixin, LinearModel, _rescale_data\nfrom .sag import sag_solver\nfrom ..base import RegressorMixin\nfrom ..utils.extmath import safe_sparse_dot\nfrom ..utils.extmath import row_norms\nfrom ..utils import check_X_y\nfrom ..utils import check_array\nfrom ..utils import check_consistent_length\nfrom ..utils import compute_sample_weight\nfrom ..utils import column_or_1d\nfrom ..preprocessing import LabelBinarizer\nfrom ..model_selection import GridSearchCV\nfrom ..metrics.scorer import check_scoring\nfrom ..exceptions import ConvergenceWarning\n\n\ndef _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):\n n_samples, n_features = X.shape\n X1 = sp_linalg.aslinearoperator(X)\n coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)\n\n if n_features > n_samples:\n def create_mv(curr_alpha):\n def _mv(x):\n return X1.matvec(X1.rmatvec(x)) + curr_alpha * x\n return _mv\n else:\n def create_mv(curr_alpha):\n def _mv(x):\n return X1.rmatvec(X1.matvec(x)) + curr_alpha * x\n return _mv\n\n for i in range(y.shape[1]):\n y_column = y[:, i]\n\n mv = create_mv(alpha[i])\n if n_features > n_samples:\n # kernel ridge\n # w = X.T * inv(X X^t + alpha*Id) y\n C = sp_linalg.LinearOperator(\n (n_samples, n_samples), matvec=mv, dtype=X.dtype)\n # FIXME atol\n try:\n coef, info = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')\n except TypeError:\n # old scipy\n coef, info = sp_linalg.cg(C, y_column, tol=tol)\n coefs[i] = X1.rmatvec(coef)\n else:\n # linear ridge\n # w = inv(X^t X + alpha*Id) * X.T y\n y_column = X1.rmatvec(y_column)\n C = sp_linalg.LinearOperator(\n (n_features, n_features), matvec=mv, dtype=X.dtype)\n # FIXME atol\n try:\n coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,\n tol=tol, atol='legacy')\n except TypeError:\n # old scipy\n coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,\n tol=tol)\n\n if info < 0:\n raise ValueError(\"Failed with error code %d\" % info)\n\n if max_iter is None and info > 0 and verbose:\n warnings.warn(\"sparse_cg did not converge after %d iterations.\" %\n info, ConvergenceWarning)\n\n return coefs\n\n\ndef _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):\n n_samples, n_features = X.shape\n coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)\n n_iter = np.empty(y.shape[1], dtype=np.int32)\n\n # According to the lsqr documentation, alpha = damp^2.\n sqrt_alpha = np.sqrt(alpha)\n\n for i in range(y.shape[1]):\n y_column = y[:, i]\n info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],\n atol=tol, btol=tol, iter_lim=max_iter)\n coefs[i] = info[0]\n n_iter[i] = info[2]\n\n return coefs, n_iter\n\n\ndef _solve_cholesky(X, y, alpha):\n # w = inv(X^t X + alpha*Id) * X.T y\n n_samples, n_features = X.shape\n n_targets = y.shape[1]\n\n A = safe_sparse_dot(X.T, X, dense_output=True)\n Xy = safe_sparse_dot(X.T, y, dense_output=True)\n\n one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])\n\n if one_alpha:\n A.flat[::n_features + 1] += alpha[0]\n return linalg.solve(A, Xy, sym_pos=True,\n overwrite_a=True).T\n else:\n coefs = np.empty([n_targets, n_features], dtype=X.dtype)\n for coef, target, current_alpha in zip(coefs, Xy.T, alpha):\n A.flat[::n_features + 1] += current_alpha\n coef[:] = linalg.solve(A, target, sym_pos=True,\n overwrite_a=False).ravel()\n A.flat[::n_features + 1] -= current_alpha\n return coefs\n\n\ndef _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):\n # dual_coef = inv(X X^t + alpha*Id) y\n n_samples = K.shape[0]\n n_targets = y.shape[1]\n\n if copy:\n K = K.copy()\n\n alpha = np.atleast_1d(alpha)\n one_alpha = (alpha == alpha[0]).all()\n has_sw = isinstance(sample_weight, np.ndarray) \\\n or sample_weight not in [1.0, None]\n\n if has_sw:\n # Unlike other solvers, we need to support sample_weight directly\n # because K might be a pre-computed kernel.\n sw = np.sqrt(np.atleast_1d(sample_weight))\n y = y * sw[:, np.newaxis]\n K *= np.outer(sw, sw)\n\n if one_alpha:\n # Only one penalty, we can solve multi-target problems in one time.\n K.flat[::n_samples + 1] += alpha[0]\n\n try:\n # Note: we must use overwrite_a=False in order to be able to\n # use the fall-back solution below in case a LinAlgError\n # is raised\n dual_coef = linalg.solve(K, y, sym_pos=True,\n overwrite_a=False)\n except np.linalg.LinAlgError:\n warnings.warn(\"Singular matrix in solving dual problem. Using \"\n \"least-squares solution instead.\")\n dual_coef = linalg.lstsq(K, y)[0]\n\n # K is expensive to compute and store in memory so change it back in\n # case it was user-given.\n K.flat[::n_samples + 1] -= alpha[0]\n\n if has_sw:\n dual_coef *= sw[:, np.newaxis]\n\n return dual_coef\n else:\n # One penalty per target. We need to solve each target separately.\n dual_coefs = np.empty([n_targets, n_samples], K.dtype)\n\n for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):\n K.flat[::n_samples + 1] += current_alpha\n\n dual_coef[:] = linalg.solve(K, target, sym_pos=True,\n overwrite_a=False).ravel()\n\n K.flat[::n_samples + 1] -= current_alpha\n\n if has_sw:\n dual_coefs *= sw[np.newaxis, :]\n\n return dual_coefs.T\n\n\ndef _solve_svd(X, y, alpha):\n U, s, Vt = linalg.svd(X, full_matrices=False)\n idx = s > 1e-15 # same default value as scipy.linalg.pinv\n s_nnz = s[idx][:, np.newaxis]\n UTy = np.dot(U.T, y)\n d = np.zeros((s.size, alpha.size), dtype=X.dtype)\n d[idx] = s_nnz / (s_nnz ** 2 + alpha)\n d_UT_y = d * UTy\n return np.dot(Vt.T, d_UT_y).T\n\n\ndef ridge_regression(X, y, alpha, sample_weight=None, solver='auto',\n max_iter=None, tol=1e-3, verbose=0, random_state=None,\n return_n_iter=False, return_intercept=False):\n \"\"\"Solve the ridge equation by the method of normal equations.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, LinearOperator},\n shape = [n_samples, n_features]\n Training data\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values\n\n alpha : {float, array-like},\n shape = [n_targets] if array-like\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as\n LogisticRegression or LinearSVC. If an array is passed, penalties are\n assumed to be specific to the targets. Hence they must correspond in\n number.\n\n sample_weight : float or numpy array of shape [n_samples]\n Individual weights for each sample. If sample_weight is not None and\n solver='auto', the solver will be set to 'cholesky'.\n\n .. versionadded:: 0.17\n\n solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}\n Solver to use in the computational routines:\n\n - 'auto' chooses the solver automatically based on the type of data.\n\n - 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. More stable for singular matrices than\n 'cholesky'.\n\n - 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution via a Cholesky decomposition of\n dot(X.T, X)\n\n - 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n - 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative\n procedure.\n\n - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses\n its improved, unbiased version named SAGA. Both methods also use an\n iterative procedure, and are often faster than other solvers when\n both n_samples and n_features are large. Note that 'sag' and\n 'saga' fast convergence is only guaranteed on features with\n approximately the same scale. You can preprocess the data with a\n scaler from sklearn.preprocessing.\n\n\n All last five solvers support both dense and sparse data. However, only\n 'sag' and 'saga' supports sparse input when`fit_intercept` is True.\n\n .. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n .. versionadded:: 0.19\n SAGA solver.\n\n max_iter : int, optional\n Maximum number of iterations for conjugate gradient solver.\n For the 'sparse_cg' and 'lsqr' solvers, the default value is determined\n by scipy.sparse.linalg. For 'sag' and saga solver, the default value is\n 1000.\n\n tol : float\n Precision of the solution.\n\n verbose : int\n Verbosity level. Setting verbose > 0 will display additional\n information depending on the solver used.\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator to use when shuffling\n the data. If int, random_state is the seed used by the random number\n generator; If RandomState instance, random_state is the random number\n generator; If None, the random number generator is the RandomState\n instance used by `np.random`. Used when ``solver`` == 'sag'.\n\n return_n_iter : boolean, default False\n If True, the method also returns `n_iter`, the actual number of\n iteration performed by the solver.\n\n .. versionadded:: 0.17\n\n return_intercept : boolean, default False\n If True and if X is sparse, the method also returns the intercept,\n and the solver is automatically changed to 'sag'. This is only a\n temporary fix for fitting the intercept with sparse data. For dense\n data, use sklearn.linear_model._preprocess_data before your regression.\n\n .. versionadded:: 0.17\n\n Returns\n -------\n coef : array, shape = [n_features] or [n_targets, n_features]\n Weight vector(s).\n\n n_iter : int, optional\n The actual number of iteration performed by the solver.\n Only returned if `return_n_iter` is True.\n\n intercept : float or array, shape = [n_targets]\n The intercept of the model. Only returned if `return_intercept`\n is True and if X is a scipy sparse array.\n\n Notes\n -----\n This function won't compute the intercept.\n \"\"\"\n if return_intercept and sparse.issparse(X) and solver != 'sag':\n if solver != 'auto':\n warnings.warn(\"In Ridge, only 'sag' solver can currently fit the \"\n \"intercept when X is sparse. Solver has been \"\n \"automatically changed into 'sag'.\")\n solver = 'sag'\n\n _dtype = [np.float64, np.float32]\n\n # SAG needs X and y columns to be C-contiguous and np.float64\n if solver in ['sag', 'saga']:\n X = check_array(X, accept_sparse=['csr'],\n dtype=np.float64, order='C')\n y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')\n else:\n X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],\n dtype=_dtype)\n y = check_array(y, dtype=X.dtype, ensure_2d=False)\n check_consistent_length(X, y)\n\n n_samples, n_features = X.shape\n\n if y.ndim > 2:\n raise ValueError(\"Target y has the wrong shape %s\" % str(y.shape))\n\n ravel = False\n if y.ndim == 1:\n y = y.reshape(-1, 1)\n ravel = True\n\n n_samples_, n_targets = y.shape\n\n if n_samples != n_samples_:\n raise ValueError(\"Number of samples in X and y does not correspond:\"\n \" %d != %d\" % (n_samples, n_samples_))\n\n has_sw = sample_weight is not None\n\n if solver == 'auto':\n # cholesky if it's a dense array and cg in any other case\n if not sparse.issparse(X) or has_sw:\n solver = 'cholesky'\n else:\n solver = 'sparse_cg'\n\n if has_sw:\n if np.atleast_1d(sample_weight).ndim > 1:\n raise ValueError(\"Sample weights must be 1D array or scalar\")\n\n if solver not in ['sag', 'saga']:\n # SAG supports sample_weight directly. For other solvers,\n # we implement sample_weight via a simple rescaling.\n X, y = _rescale_data(X, y, sample_weight)\n\n # There should be either 1 or n_targets penalties\n alpha = np.asarray(alpha, dtype=X.dtype).ravel()\n if alpha.size not in [1, n_targets]:\n raise ValueError(\"Number of targets and number of penalties \"\n \"do not correspond: %d != %d\"\n % (alpha.size, n_targets))\n\n if alpha.size == 1 and n_targets > 1:\n alpha = np.repeat(alpha, n_targets)\n\n if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga'):\n raise ValueError('Solver %s not understood' % solver)\n\n n_iter = None\n if solver == 'sparse_cg':\n coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)\n\n elif solver == 'lsqr':\n coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)\n\n elif solver == 'cholesky':\n if n_features > n_samples:\n K = safe_sparse_dot(X, X.T, dense_output=True)\n try:\n dual_coef = _solve_cholesky_kernel(K, y, alpha)\n\n coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T\n except linalg.LinAlgError:\n # use SVD solver if matrix is singular\n solver = 'svd'\n\n else:\n try:\n coef = _solve_cholesky(X, y, alpha)\n except linalg.LinAlgError:\n # use SVD solver if matrix is singular\n solver = 'svd'\n\n elif solver in ['sag', 'saga']:\n # precompute max_squared_sum for all targets\n max_squared_sum = row_norms(X, squared=True).max()\n\n coef = np.empty((y.shape[1], n_features))\n n_iter = np.empty(y.shape[1], dtype=np.int32)\n intercept = np.zeros((y.shape[1], ))\n for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):\n init = {'coef': np.zeros((n_features + int(return_intercept), 1))}\n coef_, n_iter_, _ = sag_solver(\n X, target.ravel(), sample_weight, 'squared', alpha_i, 0,\n max_iter, tol, verbose, random_state, False, max_squared_sum,\n init,\n is_saga=solver == 'saga')\n if return_intercept:\n coef[i] = coef_[:-1]\n intercept[i] = coef_[-1]\n else:\n coef[i] = coef_\n n_iter[i] = n_iter_\n\n if intercept.shape[0] == 1:\n intercept = intercept[0]\n coef = np.asarray(coef)\n\n if solver == 'svd':\n if sparse.issparse(X):\n raise TypeError('SVD solver does not support sparse'\n ' inputs currently')\n coef = _solve_svd(X, y, alpha)\n\n if ravel:\n # When y was passed as a 1d-array, we flatten the coefficients.\n coef = coef.ravel()\n\n if return_n_iter and return_intercept:\n return coef, n_iter, intercept\n elif return_intercept:\n return coef, intercept\n elif return_n_iter:\n return coef, n_iter\n else:\n return coef\n\n\nclass _BaseRidge(LinearModel, metaclass=ABCMeta):\n\n @abstractmethod\n def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,\n copy_X=True, max_iter=None, tol=1e-3, solver=\"auto\",\n random_state=None):\n self.alpha = alpha\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.copy_X = copy_X\n self.max_iter = max_iter\n self.tol = tol\n self.solver = solver\n self.random_state = random_state\n\n def fit(self, X, y, sample_weight=None):\n\n if self.solver in ('sag', 'saga'):\n _dtype = np.float64\n else:\n # all other solvers work at both float precision levels\n _dtype = [np.float64, np.float32]\n\n X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=_dtype,\n multi_output=True, y_numeric=True)\n\n if ((sample_weight is not None) and\n np.atleast_1d(sample_weight).ndim > 1):\n raise ValueError(\"Sample weights must be 1D array or scalar\")\n\n X, y, X_offset, y_offset, X_scale = self._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X,\n sample_weight=sample_weight)\n\n # temporary fix for fitting the intercept with sparse data using 'sag'\n if sparse.issparse(X) and self.fit_intercept:\n self.coef_, self.n_iter_, self.intercept_ = ridge_regression(\n X, y, alpha=self.alpha, sample_weight=sample_weight,\n max_iter=self.max_iter, tol=self.tol, solver=self.solver,\n random_state=self.random_state, return_n_iter=True,\n return_intercept=True)\n self.intercept_ += y_offset\n else:\n self.coef_, self.n_iter_ = ridge_regression(\n X, y, alpha=self.alpha, sample_weight=sample_weight,\n max_iter=self.max_iter, tol=self.tol, solver=self.solver,\n random_state=self.random_state, return_n_iter=True,\n return_intercept=False)\n self._set_intercept(X_offset, y_offset, X_scale)\n\n return self\n\n\nclass Ridge(_BaseRidge, RegressorMixin):\n \"\"\"Linear least squares with l2 regularization.\n\n Minimizes the objective function::\n\n ||y - Xw||^2_2 + alpha * ||w||^2_2\n\n This model solves a regression model where the loss function is\n the linear least squares function and regularization is given by\n the l2-norm. Also known as Ridge Regression or Tikhonov regularization.\n This estimator has built-in support for multi-variate regression\n (i.e., when y is a 2d-array of shape [n_samples, n_targets]).\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alpha : {float, array-like}, shape (n_targets)\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as\n LogisticRegression or LinearSVC. If an array is passed, penalties are\n assumed to be specific to the targets. Hence they must correspond in\n number.\n\n fit_intercept : boolean\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n copy_X : boolean, optional, default True\n If True, X will be copied; else, it may be overwritten.\n\n max_iter : int, optional\n Maximum number of iterations for conjugate gradient solver.\n For 'sparse_cg' and 'lsqr' solvers, the default value is determined\n by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.\n\n tol : float\n Precision of the solution.\n\n solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}\n Solver to use in the computational routines:\n\n - 'auto' chooses the solver automatically based on the type of data.\n\n - 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. More stable for singular matrices than\n 'cholesky'.\n\n - 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution.\n\n - 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n - 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative\n procedure.\n\n - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses\n its improved, unbiased version named SAGA. Both methods also use an\n iterative procedure, and are often faster than other solvers when\n both n_samples and n_features are large. Note that 'sag' and\n 'saga' fast convergence is only guaranteed on features with\n approximately the same scale. You can preprocess the data with a\n scaler from sklearn.preprocessing.\n\n All last five solvers support both dense and sparse data. However,\n only 'sag' and 'saga' supports sparse input when `fit_intercept` is\n True.\n\n .. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n .. versionadded:: 0.19\n SAGA solver.\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator to use when shuffling\n the data. If int, random_state is the seed used by the random number\n generator; If RandomState instance, random_state is the random number\n generator; If None, the random number generator is the RandomState\n instance used by `np.random`. Used when ``solver`` == 'sag'.\n\n .. versionadded:: 0.17\n *random_state* to support Stochastic Average Gradient.\n\n Attributes\n ----------\n coef_ : array, shape (n_features,) or (n_targets, n_features)\n Weight vector(s).\n\n intercept_ : float | array, shape = (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n n_iter_ : array or None, shape (n_targets,)\n Actual number of iterations for each target. Available only for\n sag and lsqr solvers. Other solvers will return None.\n\n .. versionadded:: 0.17\n\n See also\n --------\n RidgeClassifier : Ridge classifier\n RidgeCV : Ridge regression with built-in cross validation\n :class:`sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression\n combines ridge regression with the kernel trick\n\n Examples\n --------\n >>> from sklearn.linear_model import Ridge\n >>> import numpy as np\n >>> n_samples, n_features = 10, 5\n >>> np.random.seed(0)\n >>> y = np.random.randn(n_samples)\n >>> X = np.random.randn(n_samples, n_features)\n >>> clf = Ridge(alpha=1.0)\n >>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE\n Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\n\n \"\"\"\n def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,\n copy_X=True, max_iter=None, tol=1e-3, solver=\"auto\",\n random_state=None):\n super().__init__(\n alpha=alpha, fit_intercept=fit_intercept,\n normalize=normalize, copy_X=copy_X,\n max_iter=max_iter, tol=tol, solver=solver,\n random_state=random_state)\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training data\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values\n\n sample_weight : float or numpy array of shape [n_samples]\n Individual weights for each sample\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n return super().fit(X, y, sample_weight=sample_weight)\n\n\nclass RidgeClassifier(LinearClassifierMixin, _BaseRidge):\n \"\"\"Classifier using Ridge regression.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alpha : float\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as\n LogisticRegression or LinearSVC.\n\n fit_intercept : boolean\n Whether to calculate the intercept for this model. If set to false, no\n intercept will be used in calculations (e.g. data is expected to be\n already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n copy_X : boolean, optional, default True\n If True, X will be copied; else, it may be overwritten.\n\n max_iter : int, optional\n Maximum number of iterations for conjugate gradient solver.\n The default value is determined by scipy.sparse.linalg.\n\n tol : float\n Precision of the solution.\n\n class_weight : dict or 'balanced', optional\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n\n solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}\n Solver to use in the computational routines:\n\n - 'auto' chooses the solver automatically based on the type of data.\n\n - 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. More stable for singular matrices than\n 'cholesky'.\n\n - 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution.\n\n - 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n - 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative\n procedure.\n\n - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses\n its unbiased and more flexible version named SAGA. Both methods\n use an iterative procedure, and are often faster than other solvers\n when both n_samples and n_features are large. Note that 'sag' and\n 'saga' fast convergence is only guaranteed on features with\n approximately the same scale. You can preprocess the data with a\n scaler from sklearn.preprocessing.\n\n .. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n .. versionadded:: 0.19\n SAGA solver.\n\n random_state : int, RandomState instance or None, optional, default None\n The seed of the pseudo random number generator to use when shuffling\n the data. If int, random_state is the seed used by the random number\n generator; If RandomState instance, random_state is the random number\n generator; If None, the random number generator is the RandomState\n instance used by `np.random`. Used when ``solver`` == 'sag'.\n\n Attributes\n ----------\n coef_ : array, shape (n_features,) or (n_classes, n_features)\n Weight vector(s).\n\n intercept_ : float | array, shape = (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n n_iter_ : array or None, shape (n_targets,)\n Actual number of iterations for each target. Available only for\n sag and lsqr solvers. Other solvers will return None.\n\n Examples\n --------\n >>> from sklearn.datasets import load_breast_cancer\n >>> from sklearn.linear_model import RidgeClassifier\n >>> X, y = load_breast_cancer(return_X_y=True)\n >>> clf = RidgeClassifier().fit(X, y)\n >>> clf.score(X, y) # doctest: +ELLIPSIS\n 0.9595...\n\n See also\n --------\n Ridge : Ridge regression\n RidgeClassifierCV : Ridge classifier with built-in cross validation\n\n Notes\n -----\n For multi-class classification, n_class classifiers are trained in\n a one-versus-all approach. Concretely, this is implemented by taking\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n\n def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,\n copy_X=True, max_iter=None, tol=1e-3, class_weight=None,\n solver=\"auto\", random_state=None):\n super().__init__(\n alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,\n copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,\n random_state=random_state)\n self.class_weight = class_weight\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples,n_features]\n Training data\n\n y : array-like, shape = [n_samples]\n Target values\n\n sample_weight : float or numpy array of shape (n_samples,)\n Sample weight.\n\n .. versionadded:: 0.17\n *sample_weight* support to Classifier.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],\n multi_output=True)\n\n self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)\n Y = self._label_binarizer.fit_transform(y)\n if not self._label_binarizer.y_type_.startswith('multilabel'):\n y = column_or_1d(y, warn=True)\n else:\n # we don't (yet) support multi-label classification in Ridge\n raise ValueError(\n \"%s doesn't support multi-label classification\" % (\n self.__class__.__name__))\n\n if self.class_weight:\n if sample_weight is None:\n sample_weight = 1.\n # modify the sample weights with the corresponding class weight\n sample_weight = (sample_weight *\n compute_sample_weight(self.class_weight, y))\n\n super().fit(X, Y, sample_weight=sample_weight)\n return self\n\n @property\n def classes_(self):\n return self._label_binarizer.classes_\n\n\nclass _RidgeGCV(LinearModel):\n \"\"\"Ridge regression with built-in Generalized Cross-Validation\n\n It allows efficient Leave-One-Out cross-validation.\n\n This class is not intended to be used directly. Use RidgeCV instead.\n\n Notes\n -----\n\n We want to solve (K + alpha*Id)c = y,\n where K = X X^T is the kernel matrix.\n\n Let G = (K + alpha*Id)^-1.\n\n Dual solution: c = Gy\n Primal solution: w = X^T c\n\n Compute eigendecomposition K = Q V Q^T.\n Then G = Q (V + alpha*Id)^-1 Q^T,\n where (V + alpha*Id) is diagonal.\n It is thus inexpensive to inverse for many alphas.\n\n Let loov be the vector of prediction values for each example\n when the model was fitted with all examples but this example.\n\n loov = (KGY - diag(KG)Y) / diag(I-KG)\n\n Let looe be the vector of prediction errors for each example\n when the model was fitted with all examples but this example.\n\n looe = y - loov = c / diag(G)\n\n References\n ----------\n http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf\n https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf\n \"\"\"\n\n def __init__(self, alphas=(0.1, 1.0, 10.0),\n fit_intercept=True, normalize=False,\n scoring=None, copy_X=True,\n gcv_mode=None, store_cv_values=False):\n self.alphas = np.asarray(alphas)\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.scoring = scoring\n self.copy_X = copy_X\n self.gcv_mode = gcv_mode\n self.store_cv_values = store_cv_values\n\n def _pre_compute(self, X, y, centered_kernel=True):\n # even if X is very sparse, K is usually very dense\n K = safe_sparse_dot(X, X.T, dense_output=True)\n # the following emulates an additional constant regressor\n # corresponding to fit_intercept=True\n # but this is done only when the features have been centered\n if centered_kernel:\n K += np.ones_like(K)\n v, Q = linalg.eigh(K)\n QT_y = np.dot(Q.T, y)\n return v, Q, QT_y\n\n def _decomp_diag(self, v_prime, Q):\n # compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))\n return (v_prime * Q ** 2).sum(axis=-1)\n\n def _diag_dot(self, D, B):\n # compute dot(diag(D), B)\n if len(B.shape) > 1:\n # handle case where B is > 1-d\n D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]\n return D * B\n\n def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):\n \"\"\"Helper function to avoid code duplication between self._errors and\n self._values.\n\n Notes\n -----\n We don't construct matrix G, instead compute action on y & diagonal.\n \"\"\"\n w = 1. / (v + alpha)\n constant_column = np.var(Q, 0) < 1.e-12\n # detect constant columns\n w[constant_column] = 0 # cancel the regularization for the intercept\n\n c = np.dot(Q, self._diag_dot(w, QT_y))\n G_diag = self._decomp_diag(w, Q)\n # handle case where y is 2-d\n if len(y.shape) != 1:\n G_diag = G_diag[:, np.newaxis]\n return G_diag, c\n\n def _errors(self, alpha, y, v, Q, QT_y):\n G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)\n return (c / G_diag) ** 2, c\n\n def _values(self, alpha, y, v, Q, QT_y):\n G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)\n return y - (c / G_diag), c\n\n def _pre_compute_svd(self, X, y, centered_kernel=True):\n if sparse.issparse(X):\n raise TypeError(\"SVD not supported for sparse matrices\")\n if centered_kernel:\n X = np.hstack((X, np.ones((X.shape[0], 1))))\n # to emulate fit_intercept=True situation, add a column on ones\n # Note that by centering, the other columns are orthogonal to that one\n U, s, _ = linalg.svd(X, full_matrices=0)\n v = s ** 2\n UT_y = np.dot(U.T, y)\n return v, U, UT_y\n\n def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):\n \"\"\"Helper function to avoid code duplication between self._errors_svd\n and self._values_svd.\n \"\"\"\n constant_column = np.var(U, 0) < 1.e-12\n # detect columns colinear to ones\n w = ((v + alpha) ** -1) - (alpha ** -1)\n w[constant_column] = - (alpha ** -1)\n # cancel the regularization for the intercept\n c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y\n G_diag = self._decomp_diag(w, U) + (alpha ** -1)\n if len(y.shape) != 1:\n # handle case where y is 2-d\n G_diag = G_diag[:, np.newaxis]\n return G_diag, c\n\n def _errors_svd(self, alpha, y, v, U, UT_y):\n G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)\n return (c / G_diag) ** 2, c\n\n def _values_svd(self, alpha, y, v, U, UT_y):\n G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)\n return y - (c / G_diag), c\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training data\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values. Will be cast to X's dtype if necessary\n\n sample_weight : float or array-like of shape [n_samples]\n Sample weight\n\n Returns\n -------\n self : object\n \"\"\"\n X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,\n multi_output=True, y_numeric=True)\n if sample_weight is not None and not isinstance(sample_weight, float):\n sample_weight = check_array(sample_weight, ensure_2d=False)\n n_samples, n_features = X.shape\n\n X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X,\n sample_weight=sample_weight)\n\n gcv_mode = self.gcv_mode\n with_sw = len(np.shape(sample_weight))\n\n if gcv_mode is None or gcv_mode == 'auto':\n if sparse.issparse(X) or n_features > n_samples or with_sw:\n gcv_mode = 'eigen'\n else:\n gcv_mode = 'svd'\n elif gcv_mode == \"svd\" and with_sw:\n # FIXME non-uniform sample weights not yet supported\n warnings.warn(\"non-uniform sample weights unsupported for svd, \"\n \"forcing usage of eigen\")\n gcv_mode = 'eigen'\n\n if gcv_mode == 'eigen':\n _pre_compute = self._pre_compute\n _errors = self._errors\n _values = self._values\n elif gcv_mode == 'svd':\n # assert n_samples >= n_features\n _pre_compute = self._pre_compute_svd\n _errors = self._errors_svd\n _values = self._values_svd\n else:\n raise ValueError('bad gcv_mode \"%s\"' % gcv_mode)\n\n if sample_weight is not None:\n X, y = _rescale_data(X, y, sample_weight)\n\n centered_kernel = not sparse.issparse(X) and self.fit_intercept\n\n v, Q, QT_y = _pre_compute(X, y, centered_kernel)\n n_y = 1 if len(y.shape) == 1 else y.shape[1]\n cv_values = np.zeros((n_samples * n_y, len(self.alphas)))\n C = []\n\n scorer = check_scoring(self, scoring=self.scoring, allow_none=True)\n error = scorer is None\n\n if np.any(self.alphas < 0):\n raise ValueError(\"alphas cannot be negative. \"\n \"Got {} containing some \"\n \"negative value instead.\".format(self.alphas))\n\n for i, alpha in enumerate(self.alphas):\n if error:\n out, c = _errors(float(alpha), y, v, Q, QT_y)\n else:\n out, c = _values(float(alpha), y, v, Q, QT_y)\n cv_values[:, i] = out.ravel()\n C.append(c)\n\n if error:\n best = cv_values.mean(axis=0).argmin()\n else:\n # The scorer want an object that will make the predictions but\n # they are already computed efficiently by _RidgeGCV. This\n # identity_estimator will just return them\n def identity_estimator():\n pass\n identity_estimator.decision_function = lambda y_predict: y_predict\n identity_estimator.predict = lambda y_predict: y_predict\n\n out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])\n for i in range(len(self.alphas))]\n best = np.argmax(out)\n\n self.alpha_ = self.alphas[best]\n self.dual_coef_ = C[best]\n self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)\n\n self._set_intercept(X_offset, y_offset, X_scale)\n\n if self.store_cv_values:\n if len(y.shape) == 1:\n cv_values_shape = n_samples, len(self.alphas)\n else:\n cv_values_shape = n_samples, n_y, len(self.alphas)\n self.cv_values_ = cv_values.reshape(cv_values_shape)\n\n return self\n\n\nclass _BaseRidgeCV(LinearModel):\n def __init__(self, alphas=(0.1, 1.0, 10.0),\n fit_intercept=True, normalize=False, scoring=None,\n cv=None, gcv_mode=None,\n store_cv_values=False):\n self.alphas = np.asarray(alphas)\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.scoring = scoring\n self.cv = cv\n self.gcv_mode = gcv_mode\n self.store_cv_values = store_cv_values\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Training data\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values. Will be cast to X's dtype if necessary\n\n sample_weight : float or array-like of shape [n_samples]\n Sample weight\n\n Returns\n -------\n self : object\n \"\"\"\n if self.cv is None:\n estimator = _RidgeGCV(self.alphas,\n fit_intercept=self.fit_intercept,\n normalize=self.normalize,\n scoring=self.scoring,\n gcv_mode=self.gcv_mode,\n store_cv_values=self.store_cv_values)\n estimator.fit(X, y, sample_weight=sample_weight)\n self.alpha_ = estimator.alpha_\n if self.store_cv_values:\n self.cv_values_ = estimator.cv_values_\n else:\n if self.store_cv_values:\n raise ValueError(\"cv!=None and store_cv_values=True \"\n \" are incompatible\")\n parameters = {'alpha': self.alphas}\n gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept,\n normalize=self.normalize),\n parameters, cv=self.cv, scoring=self.scoring)\n gs.fit(X, y, sample_weight=sample_weight)\n estimator = gs.best_estimator_\n self.alpha_ = gs.best_estimator_.alpha\n\n self.coef_ = estimator.coef_\n self.intercept_ = estimator.intercept_\n\n return self\n\n\nclass RidgeCV(_BaseRidgeCV, RegressorMixin):\n \"\"\"Ridge regression with built-in cross-validation.\n\n See glossary entry for :term:`cross-validation estimator`.\n\n By default, it performs Generalized Cross-Validation, which is a form of\n efficient Leave-One-Out cross-validation.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alphas : numpy array of shape [n_alphas]\n Array of alpha values to try.\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as\n LogisticRegression or LinearSVC.\n\n fit_intercept : boolean\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the efficient Leave-One-Out cross-validation\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`sklearn.model_selection.StratifiedKFold` is used, else,\n :class:`sklearn.model_selection.KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n gcv_mode : {None, 'auto', 'svd', eigen'}, optional\n Flag indicating which strategy to use when performing\n Generalized Cross-Validation. Options are::\n\n 'auto' : use svd if n_samples > n_features or when X is a sparse\n matrix, otherwise use eigen\n 'svd' : force computation via singular value decomposition of X\n (does not work for sparse matrices)\n 'eigen' : force computation via eigendecomposition of X^T X\n\n The 'auto' mode is the default and is intended to pick the cheaper\n option of the two depending upon the shape and format of the training\n data.\n\n store_cv_values : boolean, default=False\n Flag indicating if the cross-validation values corresponding to\n each alpha should be stored in the ``cv_values_`` attribute (see\n below). This flag is only compatible with ``cv=None`` (i.e. using\n Generalized Cross-Validation).\n\n Attributes\n ----------\n cv_values_ : array, shape = [n_samples, n_alphas] or \\\n shape = [n_samples, n_targets, n_alphas], optional\n Cross-validation values for each alpha (if ``store_cv_values=True``\\\n and ``cv=None``). After ``fit()`` has been called, this attribute \\\n will contain the mean squared errors (by default) or the values \\\n of the ``{loss,score}_func`` function (if provided in the constructor).\n\n coef_ : array, shape = [n_features] or [n_targets, n_features]\n Weight vector(s).\n\n intercept_ : float | array, shape = (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n alpha_ : float\n Estimated regularization parameter.\n\n Examples\n --------\n >>> from sklearn.datasets import load_diabetes\n >>> from sklearn.linear_model import RidgeCV\n >>> X, y = load_diabetes(return_X_y=True)\n >>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)\n >>> clf.score(X, y) # doctest: +ELLIPSIS\n 0.5166...\n\n See also\n --------\n Ridge : Ridge regression\n RidgeClassifier : Ridge classifier\n RidgeClassifierCV : Ridge classifier with built-in cross validation\n \"\"\"\n pass\n\n\nclass RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n \"\"\"Ridge classifier with built-in cross-validation.\n\n See glossary entry for :term:`cross-validation estimator`.\n\n By default, it performs Generalized Cross-Validation, which is a form of\n efficient Leave-One-Out cross-validation. Currently, only the n_features >\n n_samples case is handled efficiently.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alphas : numpy array of shape [n_alphas]\n Array of alpha values to try.\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as\n LogisticRegression or LinearSVC.\n\n fit_intercept : boolean\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the efficient Leave-One-Out cross-validation\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n class_weight : dict or 'balanced', optional\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n\n store_cv_values : boolean, default=False\n Flag indicating if the cross-validation values corresponding to\n each alpha should be stored in the ``cv_values_`` attribute (see\n below). This flag is only compatible with ``cv=None`` (i.e. using\n Generalized Cross-Validation).\n\n Attributes\n ----------\n cv_values_ : array, shape = [n_samples, n_targets, n_alphas], optional\n Cross-validation values for each alpha (if ``store_cv_values=True`` and\n ``cv=None``). After ``fit()`` has been called, this attribute will\n contain the mean squared errors (by default) or the values of the\n ``{loss,score}_func`` function (if provided in the constructor).\n\n coef_ : array, shape = [n_features] or [n_targets, n_features]\n Weight vector(s).\n\n intercept_ : float | array, shape = (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n alpha_ : float\n Estimated regularization parameter\n\n Examples\n --------\n >>> from sklearn.datasets import load_breast_cancer\n >>> from sklearn.linear_model import RidgeClassifierCV\n >>> X, y = load_breast_cancer(return_X_y=True)\n >>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)\n >>> clf.score(X, y) # doctest: +ELLIPSIS\n 0.9630...\n\n See also\n --------\n Ridge : Ridge regression\n RidgeClassifier : Ridge classifier\n RidgeCV : Ridge regression with built-in cross validation\n\n Notes\n -----\n For multi-class classification, n_class classifiers are trained in\n a one-versus-all approach. Concretely, this is implemented by taking\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n\n def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,\n normalize=False, scoring=None, cv=None, class_weight=None,\n store_cv_values=False):\n super().__init__(\n alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,\n scoring=scoring, cv=cv, store_cv_values=store_cv_values)\n self.class_weight = class_weight\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the ridge classifier.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like, shape (n_samples,)\n Target values. Will be cast to X's dtype if necessary\n\n sample_weight : float or numpy array of shape (n_samples,)\n Sample weight.\n\n Returns\n -------\n self : object\n \"\"\"\n check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],\n multi_output=True)\n\n self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)\n Y = self._label_binarizer.fit_transform(y)\n if not self._label_binarizer.y_type_.startswith('multilabel'):\n y = column_or_1d(y, warn=True)\n\n if self.class_weight:\n if sample_weight is None:\n sample_weight = 1.\n # modify the sample weights with the corresponding class weight\n sample_weight = (sample_weight *\n compute_sample_weight(self.class_weight, y))\n\n _BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)\n return self\n\n @property\n def classes_(self):\n return self._label_binarizer.classes_\n", "from itertools import chain, product\nimport warnings\n\nimport pytest\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom sklearn.utils.testing import (assert_equal, assert_raises,\n assert_array_equal,\n SkipTest, assert_raises_regex,\n assert_warns_message, assert_no_warnings)\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils import deprecated\nfrom sklearn.utils import resample\nfrom sklearn.utils import safe_mask\nfrom sklearn.utils import column_or_1d\nfrom sklearn.utils import safe_indexing\nfrom sklearn.utils import shuffle\nfrom sklearn.utils import gen_even_slices\nfrom sklearn.utils import get_chunk_n_rows\nfrom sklearn.utils import is_scalar_nan\nfrom sklearn.utils.mocking import MockDataFrame\nfrom sklearn import config_context\n\n\ndef test_make_rng():\n # Check the check_random_state utility function behavior\n assert check_random_state(None) is np.random.mtrand._rand\n assert check_random_state(np.random) is np.random.mtrand._rand\n\n rng_42 = np.random.RandomState(42)\n assert check_random_state(42).randint(100) == rng_42.randint(100)\n\n rng_42 = np.random.RandomState(42)\n assert check_random_state(rng_42) is rng_42\n\n rng_42 = np.random.RandomState(42)\n assert check_random_state(43).randint(100) != rng_42.randint(100)\n\n assert_raises(ValueError, check_random_state, \"some invalid seed\")\n\n\ndef test_deprecated():\n # Test whether the deprecated decorator issues appropriate warnings\n # Copied almost verbatim from https://docs.python.org/library/warnings.html\n\n # First a function...\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n\n @deprecated()\n def ham():\n return \"spam\"\n\n spam = ham()\n\n assert_equal(spam, \"spam\") # function must remain usable\n\n assert_equal(len(w), 1)\n assert issubclass(w[0].category, DeprecationWarning)\n assert \"deprecated\" in str(w[0].message).lower()\n\n # ... then a class.\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n\n @deprecated(\"don't use this\")\n class Ham(object):\n SPAM = 1\n\n ham = Ham()\n\n assert hasattr(ham, \"SPAM\")\n\n assert_equal(len(w), 1)\n assert issubclass(w[0].category, DeprecationWarning)\n assert \"deprecated\" in str(w[0].message).lower()\n\n\ndef test_resample():\n # Border case not worth mentioning in doctests\n assert resample() is None\n\n # Check that invalid arguments yield ValueError\n assert_raises(ValueError, resample, [0], [0, 1])\n assert_raises(ValueError, resample, [0, 1], [0, 1],\n replace=False, n_samples=3)\n assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)\n # Issue:6581, n_samples can be more when replace is True (default).\n assert_equal(len(resample([1, 2], n_samples=5)), 5)\n\n\ndef test_safe_mask():\n random_state = check_random_state(0)\n X = random_state.rand(5, 4)\n X_csr = sp.csr_matrix(X)\n mask = [False, False, True, True, True]\n\n mask = safe_mask(X, mask)\n assert_equal(X[mask].shape[0], 3)\n\n mask = safe_mask(X_csr, mask)\n assert_equal(X_csr[mask].shape[0], 3)\n\n\ndef test_column_or_1d():\n EXAMPLES = [\n (\"binary\", [\"spam\", \"egg\", \"spam\"]),\n (\"binary\", [0, 1, 0, 1]),\n (\"continuous\", np.arange(10) / 20.),\n (\"multiclass\", [1, 2, 3]),\n (\"multiclass\", [0, 1, 2, 2, 0]),\n (\"multiclass\", [[1], [2], [3]]),\n (\"multilabel-indicator\", [[0, 1, 0], [0, 0, 1]]),\n (\"multiclass-multioutput\", [[1, 2, 3]]),\n (\"multiclass-multioutput\", [[1, 1], [2, 2], [3, 1]]),\n (\"multiclass-multioutput\", [[5, 1], [4, 2], [3, 1]]),\n (\"multiclass-multioutput\", [[1, 2, 3]]),\n (\"continuous-multioutput\", np.arange(30).reshape((-1, 3))),\n ]\n\n for y_type, y in EXAMPLES:\n if y_type in [\"binary\", 'multiclass', \"continuous\"]:\n assert_array_equal(column_or_1d(y), np.ravel(y))\n else:\n assert_raises(ValueError, column_or_1d, y)\n\n\ndef test_safe_indexing():\n X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n inds = np.array([1, 2])\n X_inds = safe_indexing(X, inds)\n X_arrays = safe_indexing(np.array(X), inds)\n assert_array_equal(np.array(X_inds), X_arrays)\n assert_array_equal(np.array(X_inds), np.array(X)[inds])\n\n\ndef test_safe_indexing_pandas():\n try:\n import pandas as pd\n except ImportError:\n raise SkipTest(\"Pandas not found\")\n X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n X_df = pd.DataFrame(X)\n inds = np.array([1, 2])\n X_df_indexed = safe_indexing(X_df, inds)\n X_indexed = safe_indexing(X_df, inds)\n assert_array_equal(np.array(X_df_indexed), X_indexed)\n # fun with read-only data in dataframes\n # this happens in joblib memmapping\n X.setflags(write=False)\n X_df_readonly = pd.DataFrame(X)\n inds_readonly = inds.copy()\n inds_readonly.setflags(write=False)\n\n for this_df, this_inds in product([X_df, X_df_readonly],\n [inds, inds_readonly]):\n with warnings.catch_warnings(record=True):\n X_df_indexed = safe_indexing(this_df, this_inds)\n\n assert_array_equal(np.array(X_df_indexed), X_indexed)\n\n\ndef test_safe_indexing_mock_pandas():\n X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n X_df = MockDataFrame(X)\n inds = np.array([1, 2])\n X_df_indexed = safe_indexing(X_df, inds)\n X_indexed = safe_indexing(X_df, inds)\n assert_array_equal(np.array(X_df_indexed), X_indexed)\n\n\ndef test_shuffle_on_ndim_equals_three():\n def to_tuple(A): # to make the inner arrays hashable\n return tuple(tuple(tuple(C) for C in B) for B in A)\n\n A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)\n S = set(to_tuple(A))\n shuffle(A) # shouldn't raise a ValueError for dim = 3\n assert_equal(set(to_tuple(A)), S)\n\n\ndef test_shuffle_dont_convert_to_array():\n # Check that shuffle does not try to convert to numpy arrays with float\n # dtypes can let any indexable datastructure pass-through.\n a = ['a', 'b', 'c']\n b = np.array(['a', 'b', 'c'], dtype=object)\n c = [1, 2, 3]\n d = MockDataFrame(np.array([['a', 0],\n ['b', 1],\n ['c', 2]],\n dtype=object))\n e = sp.csc_matrix(np.arange(6).reshape(3, 2))\n a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)\n\n assert_equal(a_s, ['c', 'b', 'a'])\n assert_equal(type(a_s), list)\n\n assert_array_equal(b_s, ['c', 'b', 'a'])\n assert_equal(b_s.dtype, object)\n\n assert_equal(c_s, [3, 2, 1])\n assert_equal(type(c_s), list)\n\n assert_array_equal(d_s, np.array([['c', 2],\n ['b', 1],\n ['a', 0]],\n dtype=object))\n assert_equal(type(d_s), MockDataFrame)\n\n assert_array_equal(e_s.toarray(), np.array([[4, 5],\n [2, 3],\n [0, 1]]))\n\n\ndef test_gen_even_slices():\n # check that gen_even_slices contains all samples\n some_range = range(10)\n joined_range = list(chain(*[some_range[slice] for slice in\n gen_even_slices(10, 3)]))\n assert_array_equal(some_range, joined_range)\n\n # check that passing negative n_chunks raises an error\n slices = gen_even_slices(10, -1)\n assert_raises_regex(ValueError, \"gen_even_slices got n_packs=-1, must be\"\n \" >=1\", next, slices)\n\n\[email protected](\n ('row_bytes', 'max_n_rows', 'working_memory', 'expected', 'warning'),\n [(1024, None, 1, 1024, None),\n (1024, None, 0.99999999, 1023, None),\n (1023, None, 1, 1025, None),\n (1025, None, 1, 1023, None),\n (1024, None, 2, 2048, None),\n (1024, 7, 1, 7, None),\n (1024 * 1024, None, 1, 1, None),\n (1024 * 1024 + 1, None, 1, 1,\n 'Could not adhere to working_memory config. '\n 'Currently 1MiB, 2MiB required.'),\n ])\ndef test_get_chunk_n_rows(row_bytes, max_n_rows, working_memory,\n expected, warning):\n if warning is not None:\n def check_warning(*args, **kw):\n return assert_warns_message(UserWarning, warning, *args, **kw)\n else:\n check_warning = assert_no_warnings\n\n actual = check_warning(get_chunk_n_rows,\n row_bytes=row_bytes,\n max_n_rows=max_n_rows,\n working_memory=working_memory)\n\n assert actual == expected\n assert type(actual) is type(expected)\n with config_context(working_memory=working_memory):\n actual = check_warning(get_chunk_n_rows,\n row_bytes=row_bytes,\n max_n_rows=max_n_rows)\n assert actual == expected\n assert type(actual) is type(expected)\n\n\[email protected](\"value, result\", [(float(\"nan\"), True),\n (np.nan, True),\n (np.float(\"nan\"), True),\n (np.float32(\"nan\"), True),\n (np.float64(\"nan\"), True),\n (0, False),\n (0., False),\n (None, False),\n (\"\", False),\n (\"nan\", False),\n ([np.nan], False)])\ndef test_is_scalar_nan(value, result):\n assert is_scalar_nan(value) is result\n\n\ndef dummy_func():\n pass\n\n\ndef test_deprecation_joblib_api(tmpdir):\n def check_warning(*args, **kw):\n return assert_warns_message(\n DeprecationWarning, \"deprecated in version 0.20.1\", *args, **kw)\n\n # Ensure that the joblib API is deprecated in sklearn.util\n from sklearn.utils import Parallel, Memory, delayed\n from sklearn.utils import cpu_count, hash, effective_n_jobs\n check_warning(Memory, str(tmpdir))\n check_warning(hash, 1)\n check_warning(Parallel)\n check_warning(cpu_count)\n check_warning(effective_n_jobs, 1)\n check_warning(delayed, dummy_func)\n\n # Only parallel_backend and register_parallel_backend are not deprecated in\n # sklearn.utils\n from sklearn.utils import parallel_backend, register_parallel_backend\n assert_no_warnings(parallel_backend, 'loky', None)\n assert_no_warnings(register_parallel_backend, 'failing', None)\n\n # Ensure that the deprecation have no side effect in sklearn.utils._joblib\n from sklearn.utils._joblib import Parallel, Memory, delayed\n from sklearn.utils._joblib import cpu_count, hash, effective_n_jobs\n from sklearn.utils._joblib import parallel_backend\n from sklearn.utils._joblib import register_parallel_backend\n assert_no_warnings(Memory, str(tmpdir))\n assert_no_warnings(hash, 1)\n assert_no_warnings(Parallel)\n assert_no_warnings(cpu_count)\n assert_no_warnings(effective_n_jobs, 1)\n assert_no_warnings(delayed, dummy_func)\n assert_no_warnings(parallel_backend, 'loky', None)\n assert_no_warnings(register_parallel_backend, 'failing', None)\n\n from sklearn.utils._joblib import joblib\n del joblib.parallel.BACKENDS['failing']\n" ]
[ [ "numpy.ones", "numpy.var", "scipy.linalg.lstsq", "numpy.any", "numpy.ones_like", "numpy.asarray", "scipy.sparse.linalg.LinearOperator", "scipy.sparse.linalg.aslinearoperator", "scipy.linalg.eigh", "numpy.zeros", "scipy.linalg.solve", "numpy.repeat", "numpy.argmax", "scipy.sparse.linalg.lsqr", "scipy.sparse.linalg.cg", "numpy.empty", "scipy.sparse.issparse", "numpy.atleast_1d", "numpy.shape", "numpy.sqrt", "numpy.dot", "numpy.outer", "scipy.linalg.svd" ], [ "sklearn.utils.testing.SkipTest", "sklearn.utils.deprecated", "numpy.random.RandomState", "sklearn.utils.testing.assert_equal", "sklearn.utils.testing.assert_raises_regex", "sklearn.config_context", "numpy.float", "sklearn.utils.testing.assert_no_warnings", "numpy.float64", "sklearn.utils.safe_mask", "sklearn.utils.gen_even_slices", "sklearn.utils.testing.assert_array_equal", "sklearn.utils.resample", "sklearn.utils.check_random_state", "numpy.float32", "sklearn.utils.shuffle", "sklearn.utils.testing.assert_warns_message", "numpy.arange", "sklearn.utils.is_scalar_nan", "sklearn.utils.column_or_1d", "sklearn.utils.mocking.MockDataFrame", "pandas.DataFrame", "scipy.sparse.csr_matrix", "sklearn.utils.testing.assert_raises", "numpy.ravel", "sklearn.utils.safe_indexing", "numpy.array" ] ]
Easonyesheng/StereoCameraToolk
[ "9319b7f4e5ce36833de722a15e1074e82b8b4f84" ]
[ "models/ModelUtil/util.py" ]
[ "\"\"\"Utility \"\"\"\n\nimport numpy as np\nimport cv2\nimport os\nimport logging\n\ndef check_string_is_empty(string):\n \"\"\"name\n check string empty or not\n Args: \n\n Returns:\n\n \"\"\"\n if string == '':\n return True\n\n return False\n\ndef check_numpy_array(array):\n \"\"\"name\n check array empty or not\n Args: \n\n Returns:\n True - Exist\n \"\"\"\n try:\n array.all()\n except AttributeError:\n return False\n \n return True\n\ndef after_cv_imshow():\n \"\"\"name\n\n close all the show window if press 'esc'\n set after cv2.imshow()\n\n Args:\n\n Returns:\n\n \"\"\"\n k = cv2.waitKey(0)\n if k == 27:\n cv2.destroyAllWindows()\n\ndef save_img_with_prefix(img, path, name):\n \"\"\"name\n\n save as 'path/name.jpg'\n\n Args:\n\n Returns:\n\n \"\"\"\n cv2.imwrite(os.path.join(path,name+'.jpg'), img)\n\ndef img_show(img, name):\n \"\"\"\n \"\"\"\n cv2.startWindowThread()\n img = img / np.max(img)\n cv2.imshow(name, img)\n after_cv_imshow()\n\ndef test_dir_if_not_create(path):\n \"\"\"name\n\n save as 'path/name.jpg'\n\n Args:\n\n Returns:\n \"\"\"\n if os.path.isdir(path):\n return True\n else:\n print('Create New Folder:', path)\n os.makedirs(path)\n return True\n\ndef log_init(logfilename):\n \"\"\"name\n\n save as 'path/name.jpg'\n\n Args:\n\n Returns:\n \"\"\"\n # logging.basicConfig(filename=logfilename, level=logging.INFO)\n # logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n # filename=logfilename,\n # level=logging.DEBUG)\n\n logger = logging.getLogger() # 不加名称设置root logger\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s: - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n # 使用FileHandler输出到文件\n fh = logging.FileHandler(logfilename, 'w')\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n\n # 使用StreamHandler输出到屏幕\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n\n # 添加两个Handler\n logger.addHandler(ch)\n logger.addHandler(fh)" ]
[ [ "numpy.max" ] ]
DewMaple/opencv-learning
[ "51991a5b9badf24cda740c1377f6be30dea91e1d" ]
[ "lesson_4_transformation/lesson_4_affine_transformation.py" ]
[ "import cv2\nimport numpy as np\n\nfrom utils import find_image\n\nimage_path = find_image('girls_01.jpg')\nimg = cv2.imread(image_path)\nrows, cols, channel = img.shape\n\npts_src = np.float32([[50, 50], [200, 50], [50, 200]])\npts_dst = np.float32([[10, 100], [200, 80], [100, 650]])\n\nM = cv2.getAffineTransform(pts_src, pts_dst)\nres = cv2.warpAffine(img, M, (cols, rows))\ncv2.imshow('transformation by three points', res)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.float32" ] ]
wichtounet/frameworks
[ "e0cac9d4ffbbf0b1e9d2491eb70bf2c6154f313b" ]
[ "tf/experiment6.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\n\nimport argparse\nimport gzip\nimport os\nimport sys\nimport time\n\nimport os\nimport math\nimport numpy\nfrom PIL import Image\n\nimport numpy\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nbatch_size = 128\nbatches = 10009\nnum_epochs = 5\nnum_classes = 1000\n\nFLAGS = None\n\nfrom urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nimport tarfile\nimport pickle\n\ndef data_type():\n return tf.float32\n\ndef get_batch():\n index = 0\n\n global current_index\n global training_images\n global training_labels\n\n B = numpy.zeros(shape=(batch_size, 256, 256, 3))\n L = numpy.zeros(shape=(batch_size))\n\n while index < batch_size:\n try:\n img = load_img(training_images[current_index])\n B[index] = img_to_array(img)\n B[index] /= 255\n\n L[index] = training_labels[current_index]\n\n index = index + 1\n current_index = current_index + 1\n except:\n print(\"Ignore image {}\".format(training_images[current_index]))\n current_index = current_index + 1\n\n return B, keras.utils.to_categorical(L, num_classes)\n\ndef main(_):\n global current_index\n global training_images\n global training_labels\n\n label_counter = 0\n\n training_images = []\n training_labels = []\n\n for subdir, dirs, files in os.walk('/data/datasets/imagenet_resized/train/'):\n for folder in dirs:\n for folder_subdir, folder_dirs, folder_files in os.walk(os.path.join(subdir, folder)):\n for file in folder_files:\n training_images.append(os.path.join(folder_subdir, file))\n training_labels.append(label_counter)\n\n label_counter = label_counter + 1\n\n nice_n = math.floor(len(training_images) / batch_size) * batch_size\n\n print(nice_n)\n print(len(training_images))\n print(len(training_labels))\n\n import random\n\n perm = list(range(len(training_images)))\n random.shuffle(perm)\n training_images = [training_images[index] for index in perm]\n training_labels = [training_labels[index] for index in perm]\n\n print(\"Data is ready...\")\n\n train_data_node = tf.placeholder(data_type(), shape=(batch_size, 256, 256, 3))\n train_labels_node = tf.placeholder(tf.int64, shape=(batch_size,1000))\n\n # Convolutional weights\n conv1_weights = tf.Variable(tf.truncated_normal([3, 3, 3, 16], stddev=0.1, dtype=data_type()))\n conv1_biases = tf.Variable(tf.zeros([16], dtype=data_type()))\n conv2_weights = tf.Variable(tf.truncated_normal([3, 3, 16, 16], stddev=0.1, dtype=data_type()))\n conv2_biases = tf.Variable(tf.zeros([16], dtype=data_type()))\n conv3_weights = tf.Variable(tf.truncated_normal([3, 3, 16, 32], stddev=0.1, dtype=data_type()))\n conv3_biases = tf.Variable(tf.zeros([32], dtype=data_type()))\n conv4_weights = tf.Variable(tf.truncated_normal([3, 3, 32, 32], stddev=0.1, dtype=data_type()))\n conv4_biases = tf.Variable(tf.zeros([32], dtype=data_type()))\n conv5_weights = tf.Variable(tf.truncated_normal([3, 3, 32, 32], stddev=0.1, dtype=data_type()))\n conv5_biases = tf.Variable(tf.zeros([32], dtype=data_type()))\n\n # Fully connected weights\n fc1_weights = tf.Variable(tf.truncated_normal([2048, 2048], stddev=0.1, dtype=data_type()))\n fc1_biases = tf.Variable(tf.constant(0.1, shape=[2048], dtype=data_type()))\n fc2_weights = tf.Variable(tf.truncated_normal([2048, 1000], stddev=0.1, dtype=data_type()))\n fc2_biases = tf.Variable(tf.constant(0.1, shape=[1000], dtype=data_type()))\n\n def model(data):\n # Conv 1\n conv = tf.nn.conv2d(data, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')\n relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))\n pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Conv 2\n conv = tf.nn.conv2d(pool, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')\n relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))\n pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Conv 3\n conv = tf.nn.conv2d(pool, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')\n relu = tf.nn.relu(tf.nn.bias_add(conv, conv3_biases))\n pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Conv 4\n conv = tf.nn.conv2d(pool, conv4_weights, strides=[1, 1, 1, 1], padding='SAME')\n relu = tf.nn.relu(tf.nn.bias_add(conv, conv4_biases))\n pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Conv 5\n conv = tf.nn.conv2d(pool, conv5_weights, strides=[1, 1, 1, 1], padding='SAME')\n relu = tf.nn.relu(tf.nn.bias_add(conv, conv5_biases))\n pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Fully Connected\n reshape = tf.reshape(pool, [batch_size, 2048])\n hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)\n\n return tf.matmul(hidden, fc2_weights) + fc2_biases\n\n # Training computation: logits + cross-entropy loss.\n logits = model(train_data_node)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = train_labels_node))\n\n # Use simple momentum for the optimization.\n optimizer = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9).minimize(loss)\n\n acc_pred = tf.equal(tf.argmax(logits,1), tf.argmax(train_labels_node,1))\n accuracy = tf.reduce_mean(tf.cast(acc_pred, tf.float32))\n\n # Predictions for the current training minibatch.\n # train_prediction = tf.nn.softmax(logits)\n\n # Create a local session to run the training.\n with tf.Session() as sess:\n # Run all the initializers to prepare the trainable parameters.\n tf.global_variables_initializer().run(session = sess)\n print('Initialized!')\n\n for epoch in range(0, num_epochs):\n current_index = 0\n\n while current_index + batch_size < len(training_images):\n start_time = time.time()\n\n b, l = get_batch()\n\n feed_dict = {train_data_node: b, train_labels_node: l}\n\n # Run the optimizer to update weights.\n _, batch_loss, batch_accuracy = sess.run([optimizer, loss, accuracy], feed_dict=feed_dict)\n\n end_time = time.time()\n\n print('batch {}/{} loss: {} accuracy: {} duration: {}ms'.format(int(current_index / batch_size), int(nice_n / batch_size), batch_loss, batch_accuracy, 1000 * (end_time - start_time)), flush = True)\n\n print('epoch {}/{}'.format(epoch, num_epochs))\n\n # Finally print the result!\n\n current_index = 0\n acc = 0.0\n\n while current_index + batch_size < len(training_images):\n b, l = get_batch()\n\n feed_dict = {train_data_node: b, train_labels_node: l}\n [batch_accuracy] = sess.run([accuracy], feed_dict=feed_dict)\n print('Test batch accuracy:', batch_accuracy, flush = True)\n\n acc += batch_accuracy\n\n acc /= batches\n\n print('Test accuracy: %.1f%%' % acc)\n\ntf.app.run(main=main, argv=[sys.argv[0]])\n" ]
[ [ "tensorflow.placeholder", "tensorflow.nn.max_pool", "numpy.zeros", "tensorflow.app.run", "tensorflow.reshape", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.bias_add", "tensorflow.train.MomentumOptimizer", "tensorflow.global_variables_initializer", "tensorflow.nn.conv2d", "tensorflow.matmul", "tensorflow.cast", "tensorflow.argmax", "tensorflow.Session" ] ]
schrammlb2/policy-guided-sst
[ "8dce6619b9c771c39915c60fe9c54270ea1e621e" ]
[ "HER_mod/rl_modules/get_path_costs.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nimport os\nfrom scipy import stats\nfrom HER_mod.rl_modules.tsp import generate_path\nfrom HER_mod.rl_modules.hyperparams import NUM_GOALS, NUM_AGENTS\n\ngd_step_list = [0,2,5, 10, 20, 40]\n# NUM_AGENTS = 3\nN=200\n\ndef get_path_costs(train_pos_agent, train_vel_agent, perm_search=True):\n pos_run_time_list = []\n vel_run_time_list = []\n # gd_step_list = [0,5,10]\n num_agents = NUM_AGENTS\n num_goals=NUM_GOALS\n n=N\n # gd_step_list = [0,1]\n # num_agents = 2\n # num_goals=2\n # n=2\n pos_time_list = []\n vel_time_list = []\n for _ in range(num_agents):\n pos_agent = train_pos_agent()\n vel_agent = train_vel_agent()\n pos_agent_time_list = []\n vel_agent_time_list = []\n for i in range(n):\n # goals = [np.random.rand(2)*2-1 for i in range(num_goals)]\n # pos = np.random.rand(2)*2-1\n\n goals = generate_path(num_goals + 1)\n pos = goals[0]\n goals = goals[1:-1]\n # pos_agent_time_list = []\n min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.find_shortest_path(pos, goals, gd_steps=0, perm_search=perm_search)\n pos_test_time_list = [len(min_trajectory)]*len(gd_step_list)\n pos_agent_time_list.append(pos_test_time_list)\n\n\n vel_test_time_list = []\n for gd_steps in gd_step_list:\n min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, perm_search=perm_search)\n vel_test_time_list.append(len(min_trajectory))\n vel_agent_time_list.append(vel_test_time_list)\n \n pos_time_list.append(pos_agent_time_list)\n vel_time_list.append(vel_agent_time_list)\n \n vel_time_list = np.array(vel_time_list).squeeze()\n pos_time_list = np.array(pos_time_list).squeeze()\n\n relative_time_change = (vel_time_list-pos_time_list)/pos_time_list\n relative_time_change = np.mean(relative_time_change, axis=1)\n\n try:\n pickle.dump(vel_time_list, open(\"velocity_target.pkl\", 'wb'))\n pickle.dump(pos_time_list, open(\"no_velocity_target.pkl\", 'wb'))\n pickle.dump(relative_time_change, open(\"relative_time_change.pkl\", 'wb'))\n except:\n print(\"pickle failure\")\n import pdb\n pdb.set_trace()\n\n mean = relative_time_change.mean(axis=0)\n t_score = stats.t.ppf(.975, num_agents)\n ci = t_score*relative_time_change.std(axis=0)/(num_agents**.5)\n steps = np.array(gd_step_list)\n\n plt.plot(steps, mean)\n plt.fill_between(steps, mean+ci, mean-ci, alpha=.4)\n plt.xlabel(\"Gradient steps\")\n plt.ylabel(\"Relative Improvement vs standard HER\")\n plt.title(\"Relative Improvement\")\n plt.savefig(os.path.join('results', \"Relative Improvement\" + '.png'))\n plt.close()\n # import pdb\n # pdb.set_trace()\n\n\n\n# def method_comparison(train_pos_agent, train_vel_agent):\n# # method_list = ['random search', \"gradient descent\", \"gradient descent (40 steps)\", \"random\", \"0 velocity target\"]\n# method_list = ['random search', \"gradient descent\", \"random\", \"0 velocity target\"]\n\n# method_runtime_dict = {'greedy': []}\n# for method in method_list:\n# method_runtime_dict[method] = [] \n\n\n# num_agents = NUM_AGENTS\n# num_goals=NUM_GOALS\n# n=N\n\n# pos_time_list = []\n# vel_time_list = []\n# for _ in range(num_agents):\n# pos_agent = train_pos_agent()\n# vel_agent = train_vel_agent()\n\n# for method in method_runtime_dict.keys():\n# method_runtime_dict[method].append([])\n\n# for i in range(n):\n# # goals = [np.random.rand(2)*2-1 for i in range(num_goals)]\n# # pos = np.random.rand(2)*2-1\n# goals = generate_path(num_goals + 1)\n# pos = goals[0]\n# goals = goals[1:-1]\n# # pos_agent_time_list = []\n# min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.select_path(pos, goals, method=\"0 velocity target\")\n# # pos_test_time_list = [len(min_trajectory)]*len(gd_step_list)\n# method_runtime_dict['greedy'][-1].append(len(min_trajectory))\n\n\n# # vel_test_time_list = []\n# for method in method_list:\n# min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.select_path(pos, goals, method=method)\n# method_runtime_dict[method][-1].append(len(min_trajectory))\n# # vel_agent_time_list.append(vel_test_time_list)\n\n\n# greedy = method_runtime_dict['greedy']\n# method_runtime_dict = {method: np.array(method_runtime_dict[method]) for method in method_runtime_dict.keys()}\n# performance_dict = {method: (method_runtime_dict[method].mean(), 2*(method_runtime_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_runtime_dict.keys()}\n# relative_time_dict = {method: (method_runtime_dict[method] - greedy)/greedy for method in method_list}\n# improvement_dict = {method: (relative_time_dict[method].mean(), 2*(relative_time_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_list}\n\n\n# performance_list = [performance_dict[m][0] for m in method_runtime_dict.keys()]\n# performance_ci_list = [performance_dict[m][1] for m in method_runtime_dict.keys()]\n# relative_time_list = [improvement_dict[m][0] for m in method_list]\n# relative_time_ci_list = [improvement_dict[m][1] for m in method_list]\n\n\n\n\n# plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))\n# plt.xlabel(\"Method\")\n# plt.ylabel('Time to complete')\n# plt.title('Comparison of velocity target-setting methods')\n# plt.bar(range(len(performance_list)), performance_list, yerr=performance_ci_list) \n# plt.savefig(os.path.join('results', \"Method comparison -- Performance\" + '.png'))\n# plt.close()\n\n\n# plt.xticks(range(len(method_list)), method_list)\n# plt.xlabel(\"Method\")\n# plt.ylabel('Cost reduction over greedy baseline')\n# plt.title('Comparison of velocity target-setting methods')\n# plt.bar(range(len(relative_time_list)), relative_time_list, yerr=relative_time_ci_list) \n# plt.savefig(os.path.join('results', \"Method comparison -- Relative Improvement\" + '.png'))\n# plt.close()\n\n\n\ndef method_comparison(train_pos_agent, train_vel_agent):\n method_list = ['random search', \"gradient descent\", \"gradient descent (40 steps)\", \"random\", \"0 velocity target\"]\n # method_list = ['random search', \"gradient descent\", \"random\", \"0 velocity target\"]\n\n method_runtime_dict = {'greedy': []}\n for method in method_list:\n method_runtime_dict[method] = [] \n\n\n num_agents = NUM_AGENTS\n num_goals=NUM_GOALS\n n=N\n\n pos_time_list = []\n vel_time_list = []\n\n failed_counter_dict = {'greedy': 0}\n for method in method_list:\n failed_counter_dict[method] = 0\n\n\n for _ in range(num_agents):\n pos_agent = train_pos_agent()\n vel_agent = train_vel_agent()\n\n for method in method_runtime_dict.keys():\n method_runtime_dict[method].append([])\n\n for i in range(n):\n # goals = [np.random.rand(2)*2-1 for i in range(num_goals)]\n # pos = np.random.rand(2)*2-1\n goals = generate_path(num_goals + 1)\n pos = goals[0]\n goals = goals[1:-1]\n # pos_agent_time_list = []\n min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.select_path(pos, goals, method=\"0 velocity target\")\n # pos_test_time_list = [len(min_trajectory)]*len(gd_step_list)\n if successful: \n method_runtime_dict['greedy'][-1].append(len(min_trajectory))\n else: \n method_runtime_dict['greedy'][-1].append(\"NULL\")\n failed_counter_dict['greedy'] += 1\n\n\n # vel_test_time_list = []\n for method in method_list:\n min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.select_path(pos, goals, method=method)\n if successful: \n method_runtime_dict[method][-1].append(len(min_trajectory))\n else: \n method_runtime_dict[method][-1].append(\"NULL\")\n failed_counter_dict[method] += 1\n # vel_agent_time_list.append(vel_test_time_list)\n\n success_rates = {method: 1-failed_counter_dict[method]/(num_agents*n) for method in failed_counter_dict.keys()}\n\n greedy = method_runtime_dict['greedy']\n agent_performance_dict = {}\n mean_performance_dict = {}\n ci_performance_dict = {}\n\n improvement_dict = {}\n mean_improvement_dict = {}\n ci_improvement_dict = {}\n t_score = stats.t.ppf(.975, num_agents)\n\n\n for method in method_runtime_dict.keys(): \n agent_performance_dict[method] = [[time for time in agent_list if time != \"NULL\"] for agent_list in method_runtime_dict[method]]\n agent_performance_dict[method] = [sum(agent_list)/len(agent_list) for agent_list in agent_performance_dict[method]]\n mean = sum(agent_performance_dict[method])/len(agent_performance_dict[method])\n mean_performance_dict[method] = mean\n ci_performance_dict[method] = t_score*sum([(v-mean)**2 for v in agent_performance_dict[method]])**.5/len(agent_performance_dict[method])\n\n improvement_list = []\n mean_list = []\n for agent_ind in range(num_agents):\n agent_list = method_runtime_dict[method][agent_ind]\n greedy_list = greedy[agent_ind]\n improvement_list.append([(agent_list[i] - greedy_list[i])/greedy_list[i] for i in range(n) if (agent_list[i] != \"NULL\" and greedy_list[i]!= \"NULL\")])\n mean_list.append(sum(improvement_list[agent_ind])/len(improvement_list[agent_ind]))\n\n mean = sum(mean_list)/len(mean_list)\n mean_improvement_dict[method] = mean\n ci_improvement_dict[method] = t_score*sum([(v-mean)**2 for v in mean_list])**.5/len(mean_list)\n\n # agent_improvement_dict[method] = [[(time - greedy_time)/greedy_time for time in agent_list if time != \"NULL\"] for agent_list in method_runtime_dict[method]]\n # agent_performance_dict[method] = [sum(agent_list)/len(agent_list) for agent_list in agent_performance_dict[method]]\n # mean_performance_dict[method] = sum(agent_performance_dict[method])/len(agent_performance_dict[method])\n # ci_performance_dict[method] = 2*sum([(v-mean)**2 for v in agent_performance_dict[method]])**.5/len(agent_performance_dict[method])\n # method_runtime_dict = {method: np.array(method_runtime_dict[method]) for method in method_runtime_dict.keys()}\n\n # mean_performance_dict = {method: method_runtime_dict[method] for method in method_runtime_dict.keys()}\n # relative_time_dict = {method: (method_runtime_dict[method] - greedy)/greedy for method in method_list}\n # improvement_dict = {method: (relative_time_dict[method].mean(), 2*(relative_time_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_list}\n\n\n # greedy = method_runtime_dict['greedy']\n # method_runtime_dict = {method: np.array(method_runtime_dict[method]) for method in method_runtime_dict.keys()}\n # performance_dict = {method: (method_runtime_dict[method].mean(), 2*(method_runtime_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_runtime_dict.keys()}\n # relative_time_dict = {method: (method_runtime_dict[method] - greedy)/greedy for method in method_list}\n # improvement_dict = {method: (relative_time_dict[method].mean(), 2*(relative_time_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_list}\n\n\n performance_list = [mean_performance_dict[m] for m in method_runtime_dict.keys()]\n performance_ci_list = [ci_performance_dict[m] for m in method_runtime_dict.keys()]\n relative_time_list = [mean_improvement_dict[m] for m in method_list]\n relative_time_ci_list = [ci_improvement_dict[m] for m in method_list]\n\n sr_list = [success_rates[m] for m in method_runtime_dict.keys()]#method_list]\n\n\n # plt.xticks(range(len(method_list)), method_list)\n plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))\n plt.xlabel(\"Method\")\n plt.ylabel('Success rate')\n plt.title('Comparison of velocity target-setting methods')\n plt.bar(range(len(sr_list)), sr_list) \n plt.savefig(os.path.join('results', \"Method comparison -- Success Rate\" + '.png'))\n plt.close()\n\n\n plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))\n plt.xlabel(\"Method\")\n plt.ylabel('Time to complete')\n plt.title('Comparison of velocity target-setting methods')\n plt.bar(range(len(performance_list)), performance_list, yerr=performance_ci_list) \n plt.savefig(os.path.join('results', \"Method comparison -- Performance\" + '.png'))\n plt.close()\n\n\n plt.xticks(range(len(method_list)), method_list)\n plt.xlabel(\"Method\")\n plt.ylabel('Cost reduction over greedy baseline')\n plt.title('Comparison of velocity target-setting methods')\n plt.bar(range(len(relative_time_list)), relative_time_list, yerr=relative_time_ci_list) \n plt.savefig(os.path.join('results', \"Method comparison -- Relative Improvement\" + '.png'))\n plt.close()\n\n\n\n\ndef get_random_search_costs(train_vel_agent, perm_search=True):\n pos_run_time_list = []\n vel_run_time_list = []\n # gd_step_list = [0,5,10]\n num_agents = NUM_AGENTS\n num_goals=NUM_GOALS\n n=N\n # gd_step_list = [0,1]\n # num_agents = 2\n # num_goals=2\n # n=2\n rand_time_list = []\n gd_time_list = []\n for _ in range(num_agents):\n vel_agent = train_vel_agent()\n rand_search_time_list = []\n gd_search_time_list = []\n for i in range(n):\n # goals = [np.random.rand(2)*2-1 for i in range(num_goals)]\n # pos = np.random.rand(2)*2-1\n goals = generate_path(num_goals + 1)\n pos = goals[0]\n goals = goals[1:-1]\n\n rand_test_time_list = []\n gd_test_time_list = []\n for gd_steps in gd_step_list:\n # min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_start=True, perm_search=perm_search)\n min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_start=False, perm_search=perm_search)\n print(\"GD: \" + str(min_time))\n gd_test_time_list.append(len(min_trajectory))\n\n min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_search=True, perm_search=perm_search)\n print(\"random_search: \" + str(min_time))\n rand_test_time_list.append(len(min_trajectory))\n \n rand_search_time_list.append(rand_test_time_list)\n gd_search_time_list.append(gd_test_time_list)\n \n rand_time_list.append(rand_search_time_list)\n gd_time_list.append(gd_search_time_list)\n \n rand_time_list = np.array(rand_time_list).squeeze()\n gd_time_list = np.array(gd_time_list).squeeze()\n # best = np.minimum(rand_time_list.min(axis=2),gd_time_list.min(axis=2))\n\n relative_time_change = (gd_time_list-rand_time_list)/rand_time_list\n relative_time_change = np.mean(relative_time_change, axis=1)\n\n # try:\n # pickle.dump(vel_time_list, open(\"velocity_target.pkl\", 'wb'))\n # pickle.dump(pos_time_list, open(\"no_velocity_target.pkl\", 'wb'))\n # pickle.dump(relative_time_change, open(\"relative_time_change.pkl\", 'wb'))\n # except:\n # print(\"pickle failure\")\n # import pdb\n # pdb.set_trace()\n\n mean = relative_time_change.mean(axis=0)\n ci = 2*relative_time_change.std(axis=0)/(num_agents**.5)\n steps = np.array(gd_step_list)\n\n plt.plot(steps, mean)\n plt.fill_between(steps, mean+ci, mean-ci, alpha=.4)\n plt.xlabel(\"Gradient steps\")\n plt.ylabel(\"Relative Improvement vs random search\")\n plt.title(\"Relative Improvement vs random search\")\n plt.savefig(os.path.join('results', \"Improvement vs random search\" + '.png'))\n plt.close()\n\n\n t_score = stats.t.ppf(.975, num_agents)\n rands = rand_time_list.mean(axis=1)\n rand_mean = rands.mean(axis=0)\n rand_ci = t_score*rands.std(axis=0)/(num_agents**.5)\n\n gds = gd_time_list.mean(axis=1)\n gd_mean = gds.mean(axis=0)\n gd_ci = t_score*gds.std(axis=0)/(num_agents**.5)\n\n plt.plot(steps, rand_mean, color='red', label='Random Search')\n plt.fill_between(steps, rand_mean+rand_ci, rand_mean-rand_ci, alpha=.4, color='red')\n plt.plot(steps, gd_mean, color='blue', label='Gradient Descent')\n plt.fill_between(steps, gd_mean+gd_ci, gd_mean-gd_ci, alpha=.4, color='blue')\n plt.legend()\n plt.xlabel(\"Gradient steps\")\n plt.ylabel(\"Relative Improvement vs random search\")\n plt.title(\"Relative Improvement vs random search\")\n plt.savefig(os.path.join('results', \"Gradient Descent vs random search\" + '.png'))\n plt.close()\n" ]
[ [ "matplotlib.pyplot.legend", "scipy.stats.t.ppf", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "numpy.array", "matplotlib.pyplot.plot", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.xlabel", "numpy.mean" ] ]
StanczakDominik/arviz
[ "ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287" ]
[ "arviz/plots/backends/matplotlib/distplot.py" ]
[ "\"\"\"Matplotlib distplot.\"\"\"\nimport warnings\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom . import backend_show\nfrom ...kdeplot import plot_kde\nfrom ...plot_utils import matplotlib_kwarg_dealiaser\nfrom ....numeric_utils import get_bins\n\n\ndef plot_dist(\n values,\n values2,\n color,\n kind,\n cumulative,\n label,\n rotated,\n rug,\n bw,\n quantiles,\n contour,\n fill_last,\n textsize,\n plot_kwargs,\n fill_kwargs,\n rug_kwargs,\n contour_kwargs,\n contourf_kwargs,\n pcolormesh_kwargs,\n hist_kwargs,\n ax,\n backend_kwargs,\n show,\n):\n \"\"\"Matplotlib distplot.\"\"\"\n if backend_kwargs is not None:\n warnings.warn(\n (\n \"Argument backend_kwargs has not effect in matplotlib.plot_dist\"\n \"Supplied value won't be used\"\n )\n )\n backend_kwargs = None\n if ax is None:\n ax = plt.gca()\n\n if kind == \"hist\":\n ax = _histplot_mpl_op(\n values=values, values2=values2, rotated=rotated, ax=ax, hist_kwargs=hist_kwargs\n )\n\n elif kind == \"kde\":\n plot_kwargs = matplotlib_kwarg_dealiaser(plot_kwargs, \"plot\")\n plot_kwargs.setdefault(\"color\", color)\n legend = label is not None\n\n ax = plot_kde(\n values,\n values2,\n cumulative=cumulative,\n rug=rug,\n label=label,\n bw=bw,\n quantiles=quantiles,\n rotated=rotated,\n contour=contour,\n legend=legend,\n fill_last=fill_last,\n textsize=textsize,\n plot_kwargs=plot_kwargs,\n fill_kwargs=fill_kwargs,\n rug_kwargs=rug_kwargs,\n contour_kwargs=contour_kwargs,\n contourf_kwargs=contourf_kwargs,\n pcolormesh_kwargs=pcolormesh_kwargs,\n ax=ax,\n backend=\"matplotlib\",\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend_show(show):\n plt.show()\n\n return ax\n\n\ndef _histplot_mpl_op(values, values2, rotated, ax, hist_kwargs):\n \"\"\"Add a histogram for the data to the axes.\"\"\"\n if values2 is not None:\n raise NotImplementedError(\"Insert hexbin plot here\")\n\n bins = hist_kwargs.pop(\"bins\")\n if bins is None:\n bins = get_bins(values)\n ax.hist(np.asarray(values).flatten(), bins=bins, **hist_kwargs)\n\n if rotated:\n ax.set_yticks(bins[:-1])\n else:\n ax.set_xticks(bins[:-1])\n if hist_kwargs.get(\"label\") is not None:\n ax.legend()\n return ax\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.show", "numpy.asarray" ] ]
zhaocy14/SmartWalker
[ "b025a7b4a2b305838a22fe4e6116ddb951c4d7bf" ]
[ "Sensors/softskin.py" ]
[ "import serial\nimport serial.tools.list_ports\nimport numpy as np\nimport math\nimport threading\nimport re\nimport os\nimport sys\nimport time\nimport matplotlib.pyplot as plt\n\npwd = os.path.abspath(os.path.abspath(__file__))\nfather_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + \"..\")\nsys.path.append(father_path)\ndata_path = os.path.abspath(\n os.path.dirname(os.path.abspath(__file__)) + os.path.sep + \"..\" +\n os.path.sep + \"data\")\n\ndef print_serial(port):\n print(\"---------------[ %s ]---------------\" % port.name)\n print(\"Path: %s\" % port.device)\n print(\"Descript: %s\" % port.description)\n print(\"HWID: %s\" % port.hwid)\n if not None == port.manufacturer:\n print(\"Manufacture: %s\" % port.manufacturer)\n if not None == port.product:\n print(\"Product: %s\" % port.product)\n if not None == port.interface:\n print(\"Interface: %s\" % port.interface)\n print()\n\n\ndef detect_serials(location=\"1-1.1:1.0\", vid=0x10c4, pid=0xea60):\n ports = serial.tools.list_ports.comports()\n for port in ports:\n print_serial(port)\n\n if port.location.__contains__(location):\n port_path = port.device\n return port_path\n else:\n print(\"Cannot find the target device: %s\" % location)\n return None\n\n\nclass SoftSkin(object):\n\n def __init__(self, is_STM32: bool = True):\n\n port_name = detect_serials(\"1-1.3:1.0\") # Arduino Mega 2560 ttyACM0\n baud_rate = 115200\n print(port_name, baud_rate)\n self.serial = serial.Serial(port_name, baud_rate, timeout=None)\n self.pwd = os.path.abspath(os.path.abspath(__file__))\n self.father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + \"..\")\n self.serial = serial.Serial(port_name, baud_rate, timeout=None)\n self.raw_data = [] # 保存一帧数据\n self.base_data = [] # 建立一组基准值用于初始化\n self.temp_data = []\n self.port_num = 32\n self.average_length = 10\n self.average_buffer = np.zeros((self.average_length, self.port_num))\n\n # detect abnormal signal\n self.max_pressure = 0\n self.safe_change_rate = 10\n self.emergency_change_rate = 50\n self.detect_length = 10\n self.detect_buffer = np.zeros((self.detect_length, self.port_num))\n self.skin_unlock_event = threading.Event()\n self.skin_unlock_event.clear()\n\n self.build_base_line_data()\n pass\n\n def read_data(self, is_shown=1):\n try:\n one_line_data = self.serial.readline().decode(\"utf-8\")\n # print(one_line_data)\n one_line_data = one_line_data.strip('SS')\n one_line_data = one_line_data.strip('\\n')\n one_line_data = one_line_data.strip('\\r')\n one_line_data = one_line_data.split('|')\n # print(one_line_data)\n if is_shown == 1:\n print(one_line_data)\n if len(one_line_data) == self.port_num:\n one_line_data = list(map(float, one_line_data))\n one_line_data = list(map(int, one_line_data))\n self.raw_data = one_line_data\n # print(self.raw_data, type(self.raw_data), type(self.raw_data[0]))\n except BaseException as be:\n print(\"Data Error:\", be)\n\n def build_base_line_data(self, initial_size=10):\n \"\"\"\n expired, no use\n 1.建立一组基准数值\n 检测异常值\n 取平均值\n :return:\n not in use because the original signals are stable enough\n \"\"\"\n base_list = []\n for i in range(initial_size):\n self.read_data(0)\n if len(self.raw_data) == self.port_num:\n temp_raw_data = self.raw_data\n base_list += temp_raw_data\n mean_base_list = np.array(base_list).reshape([-1, self.port_num])\n add_col = np.ones(mean_base_list.shape[0]).reshape([1, -1])\n mean_base_list = add_col.dot(mean_base_list) / mean_base_list.shape[0]\n self.base_data = mean_base_list.tolist()[0]\n self.base_data = list(map(lambda x: int(x) - 1, self.base_data))\n print(\"base line data: \", self.base_data)\n pass\n\n def read_and_record(self, record=False, show=False, plot=False, plot_num=30):\n file_path = data_path + os.path.sep + \"Softskin.txt\"\n plot_array = np.zeros((plot_num, self.port_num))\n if record:\n file = open(file_path, 'w')\n while True:\n try:\n # self.serial.flushInput()\n self.read_data(0)\n if len(self.raw_data) == len(self.base_data):\n temp_data = np.array(self.raw_data) - np.array(self.base_data)\n if show:\n print(temp_data)\n print(self.max_pressure)\n if record:\n time_index = time.time()\n write_data = temp_data.tolist()\n write_data.insert(0, time_index)\n file.write(str(write_data) + '\\n')\n file.flush()\n self.temp_data = temp_data\n self.max_pressure = self.temp_data.max()\n self.detect_buffer[0:-1, :] = self.detect_buffer[1:self.detect_length, :]\n self.detect_buffer[-1, :] = np.array(self.temp_data)\n\n if plot:\n # plt.ion()\n plot_array[0:plot_num - 1, :] = plot_array[1:plot_num, :]\n plot_array[plot_num - 1, :] = np.array(temp_data)\n plt.clf()\n plt.xlabel('Time')\n plt.ylabel('pressure')\n plt.ylim((-10, 270))\n plt.plot(range(0, plot_num), plot_array)\n # plt.ioff()\n # plt.show()\n # plt.draw()\n plt.pause(0.0000000001)\n except BaseException as be:\n print(\"Data Error:\", be)\n\n def update_from_STM32(self, STM32_data: np.ndarray):\n try:\n self.raw_data = STM32_data\n except:\n pass\n\n def unlock(self):\n while True:\n change_rate = self.detect_buffer[-1, :] - self.detect_buffer[0, :]\n change_rate = change_rate.max()\n if self.safe_change_rate <= change_rate < self.emergency_change_rate:\n print(\"unlock!\")\n break\n time.sleep(0.1)\n\n\nif __name__ == '__main__':\n skin = SoftSkin()\n # skin.build_base_line_data()\n thread_reading = threading.Thread(target=skin.read_and_record, args=())\n\n time.sleep(1)\n thread_reading.start()\n\n skin.unlock()\n\n" ]
[ [ "numpy.ones", "matplotlib.pyplot.pause", "numpy.zeros", "matplotlib.pyplot.clf", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ylim", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
dvamossy/EmTract
[ "68a00e3d63fbc2c401b0d2b297bf96ffb75940e8" ]
[ "emtract/model_inference.py" ]
[ "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nfrom emtract.model import Model, ModelType\nimport pandas as pd\n\n\nclass ModelInference:\n\n MODEL_BASE_PATH = 'build/models/'\n DATA_BASE_PATH = './emtract/data/'\n\n def __init__(self, model_type):\n if model_type == 'twitter':\n self.model = Model(ModelType.TWITTER)\n else:\n self.model = Model(ModelType.STOCK_TWITS)\n\n def inference(self, text):\n return self.model.predict([text])\n\n def file_inference(self, file_name, output):\n df = pd.read_csv(file_name, header=None)\n predictions = self.model.predict(df.iloc[:, 0].values)\n predictions.to_csv(output, index=False)\n" ]
[ [ "pandas.read_csv" ] ]
pleiades-s/PyTorch-tutorials-kr
[ "3d749ea2fe67363b5d46340b742308b744fa0419" ]
[ "docs/_downloads/d923ca53b1bfbeb3c222ae46d65d485e/transfer_learning_tutorial.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n컴퓨터 비전(Vision)을 위한 전이학습(Transfer Learning)\n=======================================================\n**Author**: `Sasank Chilamkurthy <https://chsasank.github.io>`_\n **번역**: `박정환 <http://github.com/9bow>`_\n\n이 튜토리얼에서는 전이학습(Transfer Learning)을 이용하여 이미지 분류를 위한\n합성곱 신경망을 어떻게 학습시키는지 배워보겠습니다. 전이학습에 대해서는\n`CS231n 노트 <http://cs231n.github.io/transfer-learning/>`__ 에서 더 많은 내용을\n읽어보실 수 있습니다.\n\n위 노트를 인용해보면,\n\n 실제로 충분한 크기의 데이터셋을 갖추기는 상대적으로 드물기 때문에,\n (무작위 초기화를 통해) 맨 처음부터 합성곱 신경망(Convolutional\n Network) 전체를 학습하는 사람은 매우 적습니다. 대신, 매우 큰 데이터셋(예.\n 100가지 분류에 대해 120만개의 이미지가 포함된 ImageNet)에서 합성곱\n 신경망(ConvNet)을 미리 학습한 후, 이 합성곱 신경망을 관심있는 작업\n 을 위한 초기 설정 또는 고정된 특징 추출기(fixed feature extractor)로 사용합니다.\n\n이러한 전이학습 시나리오의 주요한 2가지는 다음과 같습니다:\n\n- **합성곱 신경망의 미세조정(finetuning)**: 무작위 초기화 대신, 신경망을\n ImageNet 1000 데이터셋 등으로 미리 학습한 신경망으로 초기화합니다. 학습의 나머지\n 과정들은 평상시와 같습니다.\n- **고정된 특징 추출기로써의 합성곱 신경망**: 여기서는 마지막에 완전히 연결\n 된 계층을 제외한 모든 신경망의 가중치를 고정합니다. 이 마지막의 완전히 연결된\n 계층은 새로운 무작위의 가중치를 갖는 계층으로 대체되어 이 계층만 학습합니다.\n\n\"\"\"\n# License: BSD\n# Author: Sasank Chilamkurthy\n\nfrom __future__ import print_function, division\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\n\nplt.ion() # 대화형 모드\n\n######################################################################\n# 데이터 불러오기\n# ---------------\n#\n# 데이터를 불러오기 위해 torchvision과 torch.utils.data 패키지를 사용하겠습니다.\n#\n# 여기서 풀고자 하는 문제는 **개미** 와 **벌** 을 분류하는 모델을 학습하는 것입니다.\n# 개미와 벌 각각의 학습용 이미지는 대략 120장 정도 있고, 75개의 검증용 이미지가\n# 있습니다. 일반적으로 맨 처음부터 학습을 한다면 이는 일반화하기에는 아주 작은\n# 데이터셋입니다. 하지만 우리는 전이학습을 할 것이므로, 일반화를 제법 잘 할 수 있을\n# 것입니다.\n#\n# 이 데이터셋은 ImageNet의 아주 작은 일부입니다.\n#\n# .. Note ::\n# 데이터를 `여기 <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`_\n# 에서 다운로드 받아 현재 디렉토리에 압축을 푸십시오.\n\n# 학습을 위해 데이터 증가(augmentation) 및 일반화(normalization)\n# 검증을 위한 일반화\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n}\n\ndata_dir = 'data/hymenoptera_data'\nimage_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms[x])\n for x in ['train', 'val']}\ndataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,\n shuffle=True, num_workers=4)\n for x in ['train', 'val']}\ndataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\nclass_names = image_datasets['train'].classes\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n######################################################################\n# 일부 이미지 시각화하기\n# ^^^^^^^^^^^^^^^^^^^^^^^^^\n# 데이터 증가를 이해하기 위해 일부 학습용 이미지를 시각화해보겠습니다.\n\ndef imshow(inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n plt.pause(0.001) # 갱신이 될 때까지 잠시 기다립니다.\n\n\n# 학습 데이터의 배치를 얻습니다.\ninputs, classes = next(iter(dataloaders['train']))\n\n# 배치로부터 격자 형태의 이미지를 만듭니다.\nout = torchvision.utils.make_grid(inputs)\n\nimshow(out, title=[class_names[x] for x in classes])\n\n\n######################################################################\n# 모델 학습하기\n# --------------\n#\n# 이제 모델을 학습하기 위한 일반 함수를 작성해보겠습니다. 여기서는 다음 내용들을\n# 설명합니다:\n#\n# - 학습율(learning rate) 관리(scheduling)\n# - 최적의 모델 구하기\n#\n# 아래에서 ``scheduler`` 매개변수는 ``torch.optim.lr_scheduler`` 의 LR 스케쥴러\n# 객체(Object)입니다.\n\n\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # 각 에폭(epoch)은 학습 단계와 검증 단계를 갖습니다.\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # 모델을 학습 모드로 설정\n else:\n model.eval() # 모델을 평가 모드로 설정\n\n running_loss = 0.0\n running_corrects = 0\n\n # 데이터를 반복\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # 매개변수 경사도를 0으로 설정\n optimizer.zero_grad()\n\n # 순전파\n # 학습 시에만 연산 기록을 추적\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n # 학습 단계인 경우 역전파 + 최적화\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # 통계\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n if phase == 'train':\n scheduler.step()\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n # 모델을 깊은 복사(deep copy)함\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # 가장 나은 모델 가중치를 불러옴\n model.load_state_dict(best_model_wts)\n return model\n\n\n######################################################################\n# 모델 예측값 시각화하기\n# ^^^^^^^^^^^^^^^^^^^^^^^\n#\n# 일부 이미지에 대한 예측값을 보여주는 일반화된 함수입니다.\n#\n\ndef visualize_model(model, num_images=6):\n was_training = model.training\n model.eval()\n images_so_far = 0\n fig = plt.figure()\n\n with torch.no_grad():\n for i, (inputs, labels) in enumerate(dataloaders['val']):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n\n for j in range(inputs.size()[0]):\n images_so_far += 1\n ax = plt.subplot(num_images//2, 2, images_so_far)\n ax.axis('off')\n ax.set_title('predicted: {}'.format(class_names[preds[j]]))\n imshow(inputs.cpu().data[j])\n\n if images_so_far == num_images:\n model.train(mode=was_training)\n return\n model.train(mode=was_training)\n\n######################################################################\n# 합성곱 신경망 미세조정(finetuning)\n# ----------------------------------\n#\n# 미리 학습한 모델을 불러온 후 마지막의 완전히 연결된 계층을 초기화합니다.\n#\n\nmodel_ft = models.resnet18(pretrained=True)\nnum_ftrs = model_ft.fc.in_features\n# 여기서 각 출력 샘플의 크기는 2로 설정합니다.\n# 또는, nn.Linear(num_ftrs, len (class_names))로 일반화할 수 있습니다.\nmodel_ft.fc = nn.Linear(num_ftrs, 2)\n\nmodel_ft = model_ft.to(device)\n\ncriterion = nn.CrossEntropyLoss()\n\n# 모든 매개변수들이 최적화되었는지 관찰\noptimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)\n\n# 7 에폭마다 0.1씩 학습율 감소\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)\n\n######################################################################\n# 학습 및 평가하기\n# ^^^^^^^^^^^^^^^^^^\n#\n# CPU에서는 15-25분 가량, GPU에서는 1분도 이내의 시간이 걸립니다.\n#\n\nmodel_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,\n num_epochs=25)\n\n######################################################################\n#\n\nvisualize_model(model_ft)\n\n\n######################################################################\n# 고정된 특징 추출기로써의 합성곱 신경망\n# ---------------------------------------\n#\n# 이제, 마지막 계층을 제외한 신경망의 모든 부분을 고정해야 합니다.\n# ``requires_grad == False`` 로 설정하여 매개변수를 고정하여 ``backward()`` 중에\n# 경사도가 계산되지 않도록 해야합니다.\n#\n# 이에 대한 문서는\n# `여기 <http://pytorch.org/docs/notes/autograd.html#excluding-subgraphs-from-backward>`__\n# 에서 확인할 수 있습니다.\n#\n\nmodel_conv = torchvision.models.resnet18(pretrained=True)\nfor param in model_conv.parameters():\n param.requires_grad = False\n\n# 새로 생성된 모듈의 매개변수는 기본값이 requires_grad=True 임\nnum_ftrs = model_conv.fc.in_features\nmodel_conv.fc = nn.Linear(num_ftrs, 2)\n\nmodel_conv = model_conv.to(device)\n\ncriterion = nn.CrossEntropyLoss()\n\n# 이전과는 다르게 마지막 계층의 매개변수들만 최적화되는지 관찰\noptimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)\n\n# 7 에폭마다 0.1씩 학습율 감소\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n\n\n######################################################################\n# 학습 및 평가하기\n# ^^^^^^^^^^^^^^^^^\n#\n# CPU에서 실행하는 경우 이전과 비교했을 때 약 절반 가량의 시간만이 소요될 것입니다.\n# 이는 대부분의 신경망에서 경사도를 계산할 필요가 없기 때문입니다. 하지만,\n# 순전파는 계산이 필요합니다.\n#\n\nmodel_conv = train_model(model_conv, criterion, optimizer_conv,\n exp_lr_scheduler, num_epochs=25)\n\n######################################################################\n#\n\nvisualize_model(model_conv)\n\nplt.ioff()\nplt.show()\n\n######################################################################\n# 더 배워볼 내용\n# -----------------\n#\n# 전이학습의 응용 사례(application)들을 더 알아보려면,\n# :doc:`/intermediate/quantized_transfer_learning_tutorial` 을 참조해보세요.\n#\n#\n" ]
[ [ "torch.utils.data.DataLoader", "matplotlib.pyplot.ioff", "torch.no_grad", "matplotlib.pyplot.imshow", "torch.cuda.is_available", "torch.max", "matplotlib.pyplot.pause", "torch.set_grad_enabled", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "torch.optim.lr_scheduler.StepLR", "matplotlib.pyplot.ion", "torch.sum", "torch.nn.Linear", "torch.nn.CrossEntropyLoss", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.clip", "numpy.array" ] ]
JDMusc/Online-Bullying-Image-Classifcation
[ "9196c60c554cf160d68cb9e9c41fda124abebf63" ]
[ "modelEpochs.py" ]
[ "import copy\nimport numpy as np\nfrom numpy import log10\nimport os\nfrom toolz import pipe as p\n\nfrom tensorboardX import SummaryWriter\n\nimport torch\nimport torch.nn as nn\n\nimport numpy as np\n\nimport preprocessing as pp\n\n\ndef findParam(model, name_filter):\n if callable(name_filter):\n fn = name_filter\n else:\n name_filter = [name_filter] if type(name_filter) is str else name_filter\n fn = lambda param_name: all(\n component in param_name for component in name_filter)\n \n return [(pn, pv) for (pn, pv) in model.named_parameters() if fn(pn)]\n\n\ndef setParameterRequiresGrad(model, requires_grad = False, params = None):\n params = model.parameters() if params is None else params\n for param in params:\n param.requires_grad = requires_grad\n\n\ndef runEpochs(\n model, criterion, \n dataloaders, dataset_sizes, device, \n log_params_verbose, num_epochs,\n optimizer, scheduler, \n writer):\n\n \n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n prev_model_wts = best_model_wts\n for epoch in range(num_epochs):\n epoch_acc, model_wts = _run_epoch(\n model, \n criterion, dataloaders, dataset_sizes, device, \n epoch, log_params_verbose, num_epochs, \n optimizer, scheduler, writer)\n \n _log_coef_diffs(writer, epoch, prev_model_wts, model_wts)\n prev_model_wts = model_wts\n\n if epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = model_wts\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return (model, best_acc)\n\n\ndef viewParamsToBeUpdated(model):\n return [n for (n,p) in model.named_parameters() if p.requires_grad == True]\n\n\ndef add_graph_model(writer, model, dataloaders, device):\n inputs, classes = p(dataloaders['train'], iter, next)\n \n inputs = inputs.to(device)\n classes = classes.to(device)\n \n writer.add_graph(model, inputs)\n\n\ndef _run_epoch(model, \n criterion, dataloaders, dataset_sizes, device, \n epoch, log_params_verbose, num_epochs,\n optimizer, scheduler, writer):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n n_samples = {'train': 0, 'val': 0}\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n is_train = phase == 'train'\n\n if is_train:\n scheduler.step()\n model.train()\n else:\n model.eval()\n\n running_loss = 0.0\n running_corrects = 0\n\n for inputs, labels in dataloaders[phase]:\n n_samples[phase] = n_samples[phase] + len(labels)\n\n inputs = inputs.to(device)\n labels = labels.to(device)\n preds, loss = _take_step(\n model, criterion, optimizer, inputs, labels, is_train)\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n _log_epoch_phase_stats(writer, epoch, phase, epoch_loss, epoch_acc)\n\n if log_params_verbose:\n _log_model_params_verbose(writer, model, epoch, phase)\n\n # deep copy the model\n model_wts = copy.deepcopy(model.state_dict())\n\n _log_lr(writer, epoch, scheduler)\n print('# training samples')\n print(n_samples['train'])\n print('# val samples')\n print(n_samples['val'])\n \n return epoch_acc, model_wts\n\n\n\ndef _take_step(model, criterion, optimizer, inputs, labels, is_train):\n \n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(is_train):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if is_train:\n loss.backward()\n optimizer.step()\n \n return preds, loss\n\n\ndef _add_scope(scope, k):\n return scope + '/' + k\n \n\ndef _add_scope_gen(scope):\n return lambda k: _add_scope(scope, k)\n\n\ndef _log_model_params_verbose(writer, model, run_num, scope, use_hist = False):\n def write(tag, param):\n fn = writer.add_histogram if use_hist else writer.add_scalar\n param = param if use_hist else param.abs().mean()\n return fn(tag, param, run_num)\n \n with torch.no_grad():\n for (name, param) in model.named_parameters():\n p(name, \n _add_scope_gen(scope),\n lambda tag: write(tag, param)\n )\n\n\ndef _log_lr(writer, epoch, scheduler):\n lr = p(scheduler.get_lr(), np.array)[0]\n p('lr', \n _add_scope_gen('lr'),\n lambda _: writer.add_scalar(_, lr, epoch)\n )\n p('log10_lr',\n _add_scope_gen('lr'),\n lambda _: writer.add_scalar(_, log10(lr), epoch)\n )\n\n\ndef _log_epoch_phase_stats(writer, epoch, scope, epoch_loss, epoch_acc): \n\n log_measure = lambda k, v: p(k,\n _add_scope_gen(scope),\n lambda _ : writer.add_scalar(_, v, epoch)\n )\n \n log_measure('loss', epoch_loss)\n log_measure('accuracy', epoch_acc)\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n scope, epoch_loss, epoch_acc))\n \n\ndef _log_coef_diffs(writer, epoch, prev_model_state, curr_model_state):\n def write(name, curr):\n diff = curr - prev_model_state[name]\n p(name,\n _add_scope_gen('params'),\n lambda _: writer.add_scalar(\n _ + '.diff', diff.abs().mean(), epoch)\n )\n\n with torch.no_grad():\n for name in curr_model_state:\n if ('weight' in name or 'bias' in name): \n write(name, curr_model_state[name])\n\n\n" ]
[ [ "torch.sum", "torch.set_grad_enabled", "torch.no_grad", "numpy.log10", "torch.max" ] ]
MrThiago/FaceForensics
[ "a815daa9ebb7c12240a4b7162c431af0e1b959fa" ]
[ "dataset/DeepFakes/faceswap-master/lib/training_data.py" ]
[ "import cv2\nimport numpy\nfrom random import shuffle\n\nfrom .utils import BackgroundGenerator\nfrom .umeyama import umeyama\n\nclass TrainingDataGenerator():\n def __init__(self, random_transform_args, coverage, scale=5, zoom=1): #TODO thos default should stay in the warp function\n self.random_transform_args = random_transform_args\n self.coverage = coverage\n self.scale = scale\n self.zoom = zoom\n\n def minibatchAB(self, images, batchsize):\n batch = BackgroundGenerator(self.minibatch(images, batchsize), 1)\n for ep1, warped_img, target_img in batch.iterator():\n yield ep1, warped_img, target_img\n\n # A generator function that yields epoch, batchsize of warped_img and batchsize of target_img\n def minibatch(self, data, batchsize):\n length = len(data)\n assert length >= batchsize, \"Number of images is lower than batch-size (Note that too few images may lead to bad training). # images: {}, batch-size: {}\".format(length, batchsize)\n epoch = i = 0\n shuffle(data)\n while True:\n size = batchsize\n if i+size > length:\n shuffle(data)\n i = 0\n epoch+=1\n rtn = numpy.float32([self.read_image(img) for img in data[i:i+size]])\n i+=size\n yield epoch, rtn[:,0,:,:,:], rtn[:,1,:,:,:] \n\n def color_adjust(self, img):\n return img / 255.0\n \n def read_image(self, fn):\n try:\n image = self.color_adjust(cv2.imread(fn))\n except TypeError:\n raise Exception(\"Error while reading image\", fn)\n \n image = cv2.resize(image, (256,256))\n image = self.random_transform( image, **self.random_transform_args )\n warped_img, target_img = self.random_warp( image, self.coverage, self.scale, self.zoom )\n \n return warped_img, target_img\n\n def random_transform(self, image, rotation_range, zoom_range, shift_range, random_flip):\n h, w = image.shape[0:2]\n rotation = numpy.random.uniform(-rotation_range, rotation_range)\n scale = numpy.random.uniform(1 - zoom_range, 1 + zoom_range)\n tx = numpy.random.uniform(-shift_range, shift_range) * w\n ty = numpy.random.uniform(-shift_range, shift_range) * h\n mat = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, scale)\n mat[:, 2] += (tx, ty)\n result = cv2.warpAffine(\n image, mat, (w, h), borderMode=cv2.BORDER_REPLICATE)\n if numpy.random.random() < random_flip:\n result = result[:, ::-1]\n return result\n\n # get pair of random warped images from aligned face image\n def random_warp(self, image, coverage, scale = 5, zoom = 1):\n assert image.shape == (256, 256, 3)\n range_ = numpy.linspace(128 - coverage//2, 128 + coverage//2, 5)\n mapx = numpy.broadcast_to(range_, (5, 5))\n mapy = mapx.T\n\n mapx = mapx + numpy.random.normal(size=(5,5), scale=scale)\n mapy = mapy + numpy.random.normal(size=(5,5), scale=scale)\n\n interp_mapx = cv2.resize(mapx, (80*zoom,80*zoom))[8*zoom:72*zoom,8*zoom:72*zoom].astype('float32')\n interp_mapy = cv2.resize(mapy, (80*zoom,80*zoom))[8*zoom:72*zoom,8*zoom:72*zoom].astype('float32')\n\n warped_image = cv2.remap(image, interp_mapx, interp_mapy, cv2.INTER_LINEAR)\n\n src_points = numpy.stack([mapx.ravel(), mapy.ravel() ], axis=-1)\n dst_points = numpy.mgrid[0:65*zoom:16*zoom,0:65*zoom:16*zoom].T.reshape(-1,2)\n mat = umeyama(src_points, dst_points, True)[0:2]\n\n target_image = cv2.warpAffine(image, mat, (64*zoom,64*zoom))\n\n return warped_image, target_image\n\ndef stack_images(images):\n def get_transpose_axes(n):\n if n % 2 == 0:\n y_axes = list(range(1, n - 1, 2))\n x_axes = list(range(0, n - 1, 2))\n else:\n y_axes = list(range(0, n - 1, 2))\n x_axes = list(range(1, n - 1, 2))\n return y_axes, x_axes, [n - 1]\n \n images_shape = numpy.array(images.shape)\n new_axes = get_transpose_axes(len(images_shape))\n new_shape = [numpy.prod(images_shape[x]) for x in new_axes]\n return numpy.transpose(\n images,\n axes=numpy.concatenate(new_axes)\n ).reshape(new_shape)\n" ]
[ [ "numpy.random.uniform", "numpy.random.normal", "numpy.broadcast_to", "numpy.random.random", "numpy.prod", "numpy.array", "numpy.concatenate", "numpy.linspace" ] ]
pseudowasabi/computer-vision-exercises
[ "34b7c8402c32dbb00e484f90780ebb6546a3f8dc" ]
[ "CV_A4_/A4_compute_descriptors.py" ]
[ "'''\nComputer vision assignment 4 by Yoseob Kim\nA4_compute_descriptors.py\nCompute similarity-reflected image descriptors with L1, L2 norm distances by using SIFT descriptors.\n\n* Status: (working on it)\n* GitHub Link: https://github.com/pseudowasabi/computer-vision-exercises/tree/master/CV_A4_\n'''\n\nimport cv2\nimport numpy as np\nimport math\nimport time\nimport operator\nimport random\n\nimg = cv2.imread('ukbench00000.jpg', cv2.IMREAD_GRAYSCALE)\n\n'''\nmy_min = np.inf\nmy_max = 0'''\nfor i in range(1000):\n offset = '00' if i < 10 else '0' if i < 100 else ''\n offset += str(i)\n #print(offset)\n\n f = open('./sift/sift100'+offset, 'rb')\n\n # reference - https://numpy.org/doc/stable/reference/generated/numpy.frombuffer.html\n sift_des = np.frombuffer(f.read(), dtype=np.uint8)\n #print(sift_des.shape)\n #print(sift_des)\n\n '''\n if sift_des.shape[0] % 128 != 0:\n print('divide error')\n '''\n sift_des_reshaped = np.reshape(sift_des, (sift_des.shape[0] // 128, 128))\n #print(sift_des_reshaped.shape)\n\n '''\n if sift_des_reshaped.shape[0] < my_min:\n my_min = sift_des_reshaped.shape[0]\n if sift_des_reshaped.shape[0] > my_max:\n my_max = sift_des_reshaped.shape[0]'''\n\n f.close()\n\n\n#print(my_min, my_max)\n# N size\n# min = 73, max = 2388\n\n\n\n\n" ]
[ [ "numpy.reshape" ] ]
Advestis/adnmtf
[ "7b36da64669894506071a75d8bd341edb0e75b9f" ]
[ "adnmtf/nmtf_core.py" ]
[ "\"\"\"Non-negative matrix and tensor factorization core functions\n\n\"\"\"\n\n# Author: Paul Fogel\n\n# License: MIT\n# Jan 4, '20\nfrom typing import Tuple\n\nimport numpy as np\nfrom .nmtf_utils import EPSILON, sparse_opt\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n# TODO (pcotte): typing\n# TODO (pcotte): docstrings (with parameters and returns)\n\n\ndef ntf_stack(m, mmis, n_blocks):\n \"\"\"Unfold tensor M\n for future use with NMF\n \"\"\"\n n, p = m.shape\n mmis = mmis.astype(np.int)\n n_mmis = mmis.shape[0]\n n_blocks = int(n_blocks)\n\n mstacked = np.zeros((int(n * p / n_blocks), n_blocks))\n if n_mmis > 0:\n mmis_stacked = np.zeros((int(n * p / n_blocks), n_blocks))\n else:\n mmis_stacked = np.array([])\n\n for i_block in range(0, n_blocks):\n for j in range(0, int(p / n_blocks)):\n i1 = j * n\n i2 = i1 + n\n mstacked[i1:i2, i_block] = m[:, int(i_block * p / n_blocks + j)]\n if n_mmis > 0:\n mmis_stacked[i1:i2, i_block] = mmis[:, int(i_block * p / n_blocks + j)]\n\n return mstacked, mmis_stacked\n\n\ndef ntf_solve(\n m,\n mmis,\n mt0,\n mw0,\n mb0,\n nc,\n tolerance,\n log_iter,\n status0,\n max_iterations,\n nmf_fix_user_lhe,\n nmf_fix_user_rhe,\n nmf_fix_user_bhe,\n nmf_sparse_level,\n ntf_unimodal,\n ntf_smooth,\n ntf_left_components,\n ntf_right_components,\n ntf_block_components,\n n_blocks,\n nmf_priors,\n my_status_box,\n):\n \"\"\"Interface to:\n - NTFSolve_simple\n \"\"\"\n\n if len(nmf_priors) > 0:\n n_nmf_priors, nc = nmf_priors.shape\n else:\n n_nmf_priors = 0\n\n if n_nmf_priors > 0:\n nmf_priors[nmf_priors > 0] = 1\n\n return ntf_solve_simple(\n m=m,\n mmis=mmis,\n mt0=mt0,\n mw0=mw0,\n mb0=mb0,\n nc=nc,\n tolerance=tolerance,\n log_iter=log_iter,\n status0=status0,\n max_iterations=max_iterations,\n nmf_fix_user_lhe=nmf_fix_user_lhe,\n nmf_fix_user_rhe=nmf_fix_user_rhe,\n nmf_fix_user_bhe=nmf_fix_user_bhe,\n nmf_sparse_level=nmf_sparse_level,\n ntf_unimodal=ntf_unimodal,\n ntf_smooth=ntf_smooth,\n ntf_left_components=ntf_left_components,\n ntf_right_components=ntf_right_components,\n ntf_block_components=ntf_block_components,\n n_blocks=n_blocks,\n nmf_priors=nmf_priors,\n my_status_box=my_status_box,\n )\n\n\ndef ntf_solve_simple(\n m,\n mmis,\n mt0,\n mw0,\n mb0,\n nc,\n tolerance,\n log_iter,\n status0,\n max_iterations,\n nmf_fix_user_lhe,\n nmf_fix_user_rhe,\n nmf_fix_user_bhe,\n nmf_sparse_level,\n ntf_unimodal,\n ntf_smooth,\n ntf_left_components,\n ntf_right_components,\n ntf_block_components,\n n_blocks,\n nmf_priors,\n my_status_box,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, float, int]:\n \"\"\"\n Estimate NTF matrices (HALS)\n\n Parameters\n ----------\n m: Input matrix\n mmis: Define missing values (0 = missing cell, 1 = real cell)\n mt0: Initial left hand matrix\n mw0: Initial right hand matrix\n mb0: Initial block hand matrix\n nc: NTF rank\n tolerance: Convergence threshold\n log_iter: Log results through iterations\n status0: Initial displayed status to be updated during iterations\n max_iterations: Max iterations\n nmf_fix_user_lhe: = 1 => fixed left hand matrix columns\n nmf_fix_user_rhe: = 1 => fixed right hand matrix columns\n nmf_fix_user_bhe: = 1 => fixed block hand matrix columns\n nmf_sparse_level: sparsity level (as defined by Hoyer); +/- = make RHE/LHe sparse\n ntf_unimodal: Apply Unimodal constraint on factoring vectors\n ntf_smooth: Apply Smooth constraint on factoring vectors\n ntf_left_components: Apply Unimodal/Smooth constraint on left hand matrix\n ntf_right_components: Apply Unimodal/Smooth constraint on right hand matrix\n ntf_block_components: Apply Unimodal/Smooth constraint on block hand matrix\n n_blocks: Number of NTF blocks\n nmf_priors: Elements in mw that should be updated (others remain 0)\n my_status_box\n\n Returns\n -------\n Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, float, int]\\n\n * mt: Left hand matrix\\n\n * mw: Right hand matrix\\n\n * mb: Block hand matrix\\n\n * diff: objective cost\\n\n * cancel_pressed\\n\n\n Reference\n ---------\n a. Cichocki, P.H.a.N. Anh-Huym, Fast local algorithms for large scale nonnegative matrix and tensor factorizations,\n IEICE Trans. Fundam. Electron. Commun. Comput. Sci. 92 (3) (2009) 708–721.\n \"\"\"\n\n cancel_pressed = 0\n\n n, p0 = m.shape\n n_mmis = mmis.shape[0]\n nc = int(nc)\n n_blocks = int(n_blocks)\n p = int(p0 / n_blocks)\n nxp = int(n * p)\n nxp0 = int(n * p0)\n mt = np.copy(mt0)\n mw = np.copy(mw0)\n mb = np.copy(mb0)\n # step_iter = math.ceil(MaxIterations/10)\n step_iter = 1\n pbar_step = 100 * step_iter / max_iterations\n\n id_blockp = np.arange(0, (n_blocks - 1) * p + 1, p)\n a = np.zeros(n)\n b = np.zeros(p)\n c = np.zeros(n_blocks)\n alpha = np.zeros(nc)\n\n # Compute Residual tensor\n mfit = np.zeros((n, p0))\n for k in range(0, nc):\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n mfit[:, id_blockp[i_block]: id_blockp[i_block] + p] += (\n mb[i_block, k] * np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))\n )\n else:\n mfit[:, id_blockp[0]: id_blockp[0] + p] += np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))\n\n denomt = np.zeros(n)\n denomw = np.zeros(p)\n denom_block = np.zeros((n_blocks, nc))\n mt2 = np.zeros(n)\n mw2 = np.zeros(p)\n mt_mw = np.zeros(nxp)\n denom_cutoff = 0.1\n\n if n_mmis > 0:\n mres = (m - mfit) * mmis\n else:\n mres = m - mfit\n\n my_status_box.init_bar()\n\n # Loop\n cont = 1\n i_iter = 0\n diff0 = 1.0e99\n mpart = np.zeros((n, p0))\n if abs(nmf_sparse_level) < 1:\n alpha[0] = nmf_sparse_level * 0.8\n else:\n alpha[0] = nmf_sparse_level\n\n percent_zeros = 0\n iter_sparse = 0\n\n while (cont > 0) & (i_iter < max_iterations):\n for k in range(0, nc):\n (\n n_blocks,\n mpart,\n id_blockp,\n p,\n mb,\n k,\n mt,\n n,\n mw,\n n_mmis,\n mmis,\n mres,\n nmf_fix_user_lhe,\n denomt,\n mw2,\n denom_cutoff,\n alpha,\n ntf_unimodal,\n ntf_left_components,\n ntf_smooth,\n a,\n nmf_fix_user_rhe,\n denomw,\n mt2,\n ntf_right_components,\n b,\n nmf_fix_user_bhe,\n mt_mw,\n nxp,\n denom_block,\n ntf_block_components,\n c,\n mfit,\n nmf_priors,\n ) = ntf_update(\n n_blocks=n_blocks,\n mpart=mpart,\n id_blockp=id_blockp,\n p=p,\n mb=mb,\n k=k,\n mt=mt,\n n=n,\n mw=mw,\n n_mmis=n_mmis,\n mmis=mmis,\n mres=mres,\n nmf_fix_user_lhe=nmf_fix_user_lhe,\n denomt=denomt,\n mw2=mw2,\n denom_cutoff=denom_cutoff,\n alpha=alpha,\n ntf_unimodal=ntf_unimodal,\n ntf_left_components=ntf_left_components,\n ntf_smooth=ntf_smooth,\n a=a,\n nmf_fix_user_rhe=nmf_fix_user_rhe,\n denomw=denomw,\n mt2=mt2,\n ntf_right_components=ntf_right_components,\n b=b,\n nmf_fix_user_bhe=nmf_fix_user_bhe,\n mt_mw=mt_mw,\n nxp=nxp,\n denom_block=denom_block,\n ntf_block_components=ntf_block_components,\n c=c,\n mfit=mfit,\n nmf_priors=nmf_priors,\n )\n\n if i_iter % step_iter == 0:\n # Check convergence\n diff = np.linalg.norm(mres) ** 2 / nxp0\n if (diff0 - diff) / diff0 < tolerance:\n cont = 0\n else:\n if diff > diff0:\n my_status_box.my_print(f\"{status0} Iter: {i_iter} MSR does not improve\")\n\n diff0 = diff\n\n Status = f\"{status0} Iteration: {i_iter}\"\n\n if nmf_sparse_level != 0:\n Status = f\"{Status} ; Achieved sparsity: {round(percent_zeros, 2)}; alpha: {round(alpha[0], 2)}\"\n if log_iter == 1:\n my_status_box.my_print(Status)\n\n my_status_box.update_status(status=Status)\n my_status_box.update_bar(step=pbar_step)\n if my_status_box.cancel_pressed:\n cancel_pressed = 1\n return np.array([]), mt, mw, mb, mres, cancel_pressed\n\n if log_iter == 1:\n my_status_box.my_print(status0 + \" Iter: \" + str(i_iter) + \" MSR: \" + str(diff))\n\n i_iter += 1\n\n if cont == 0 or i_iter == max_iterations or (cont == 0 and abs(nmf_sparse_level) == 1):\n if 0 < nmf_sparse_level < 1:\n sparse_test = np.zeros((nc, 1))\n percent_zeros0 = percent_zeros\n for k in range(0, nc):\n sparse_test[k] = np.where(mw[:, k] == 0)[0].size\n\n percent_zeros = np.mean(sparse_test) / p\n if percent_zeros < percent_zeros0:\n iter_sparse += 1\n else:\n iter_sparse = 0\n\n if (percent_zeros < 0.99 * nmf_sparse_level) & (iter_sparse < 50):\n alpha[0] *= min(1.05 * nmf_sparse_level / percent_zeros, 1.1)\n if alpha[0] < 1:\n i_iter = 0\n cont = 1\n\n elif 0 > nmf_sparse_level > -1:\n sparse_test = np.zeros((nc, 1))\n percent_zeros0 = percent_zeros\n for k in range(0, nc):\n sparse_test[k] = np.where(mt[:, k] == 0)[0].size\n\n percent_zeros = np.mean(sparse_test) / n\n if percent_zeros < percent_zeros0:\n iter_sparse += 1\n else:\n iter_sparse = 0\n\n if (percent_zeros < 0.99 * abs(nmf_sparse_level)) & (iter_sparse < 50):\n alpha[0] *= min(1.05 * abs(nmf_sparse_level) / percent_zeros, 1.1)\n if abs(alpha[0]) < 1:\n i_iter = 0\n cont = 1\n\n elif abs(alpha[0]) == 1:\n if alpha[0] == -1:\n for k in range(0, nc):\n if np.max(mt[:, k]) > 0:\n hhi = int(\n np.round(\n (np.linalg.norm(mt[:, k], ord=1) / (np.linalg.norm(mt[:, k], ord=2) + EPSILON))\n ** 2,\n decimals=0,\n )\n )\n alpha[k] = -1 - (n - hhi) / (n - 1)\n else:\n alpha[k] = 0\n else:\n for k in range(0, nc):\n if np.max(mw[:, k]) > 0:\n hhi = int(\n np.round(\n (np.linalg.norm(mw[:, k], ord=1) / (np.linalg.norm(mw[:, k], ord=2) + EPSILON))\n ** 2,\n decimals=0,\n )\n )\n alpha[k] = 1 + (p - hhi) / (p - 1)\n else:\n alpha[k] = 0\n\n if alpha[0] <= -1:\n alpha_real = -(alpha + 1)\n # noinspection PyTypeChecker\n alpha_min = min(alpha_real)\n for k in range(0, nc):\n # noinspection PyUnresolvedReferences\n alpha[k] = min(alpha_real[k], 2 * alpha_min)\n alpha[k] = -alpha[k] - 1\n else:\n alpha_real = alpha - 1\n alpha_min = min(alpha_real)\n for k in range(0, nc):\n alpha[k] = min(alpha_real[k], 2 * alpha_min)\n alpha[k] = alpha[k] + 1\n\n i_iter = 0\n cont = 1\n diff0 = 1.0e99\n\n for k in range(0, nc):\n hhi = np.round((np.linalg.norm(mt[:, k], ord=1) / np.linalg.norm(mt[:, k], ord=2)) ** 2, decimals=0)\n logger.info(f\"component: {k}, left hhi: {hhi}\")\n hhi = np.round((np.linalg.norm(mw[:, k], ord=1) / np.linalg.norm(mw[:, k], ord=2)) ** 2, decimals=0)\n logger.info(f\"component: {k} right hhi: {hhi}\")\n\n if (n_mmis > 0) & (nmf_fix_user_bhe == 0):\n mb *= denom_block\n\n # TODO (pcotte): mt and mw can be not yet referenced: fix that\n return np.array([]), mt, mw, mb, diff, cancel_pressed\n\n\ndef ntf_update(\n n_blocks,\n mpart,\n id_blockp,\n p,\n mb,\n k,\n mt,\n n,\n mw,\n n_mmis,\n mmis,\n mres,\n nmf_fix_user_lhe,\n denomt,\n mw2,\n denom_cutoff,\n alpha,\n ntf_unimodal,\n ntf_left_components,\n ntf_smooth,\n a,\n nmf_fix_user_rhe,\n denomw,\n mt2,\n ntf_right_components,\n b,\n nmf_fix_user_bhe,\n mt_mw,\n nxp,\n denom_block,\n ntf_block_components,\n c,\n mfit,\n nmf_priors,\n):\n \"\"\"Core updating code called by NTFSolve_simple & NTF Solve_conv\n Input:\n All variables in the calling function used in the function\n Output:\n Same as Input\n \"\"\"\n\n if len(nmf_priors) > 0:\n n_nmf_priors, nc = nmf_priors.shape\n else:\n n_nmf_priors = 0\n\n # Compute kth-part\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n mpart[:, id_blockp[i_block]: id_blockp[i_block] + p] = (\n mb[i_block, k] * np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))\n )\n else:\n mpart[:, id_blockp[0]: id_blockp[0] + p] = np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))\n\n if n_mmis > 0:\n mpart *= mmis\n\n mpart += mres\n\n if nmf_fix_user_bhe > 0:\n norm_bhe = True\n if nmf_fix_user_rhe == 0:\n norm_lhe = True\n norm_rhe = False\n else:\n norm_lhe = False\n norm_rhe = True\n else:\n norm_bhe = False\n norm_lhe = True\n norm_rhe = True\n\n if (nmf_fix_user_lhe > 0) & norm_lhe:\n norm = np.linalg.norm(mt[:, k])\n if norm > 0:\n mt[:, k] /= norm\n\n if (nmf_fix_user_rhe > 0) & norm_rhe:\n norm = np.linalg.norm(mw[:, k])\n if norm > 0:\n mw[:, k] /= norm\n\n if (nmf_fix_user_bhe > 0) & norm_bhe & (n_blocks > 1):\n norm = np.linalg.norm(mb[:, k])\n if norm > 0:\n mb[:, k] /= norm\n\n if nmf_fix_user_lhe == 0:\n # Update Mt\n mt[:, k] = 0\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n mt[:, k] += mb[i_block, k] * mpart[:, id_blockp[i_block]: id_blockp[i_block] + p] @ mw[:, k]\n else:\n mt[:, k] += mpart[:, id_blockp[0]: id_blockp[0] + p] @ mw[:, k]\n\n if n_mmis > 0:\n denomt[:] = 0\n mw2[:] = mw[:, k] ** 2\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n # Broadcast missing cells into Mw to calculate Mw.T * Mw\n denomt += mb[i_block, k] ** 2 * mmis[:, id_blockp[i_block]: id_blockp[i_block] + p] @ mw2\n else:\n denomt += mmis[:, id_blockp[0]: id_blockp[0] + p] @ mw2\n\n denomt /= np.max(denomt)\n denomt[denomt < denom_cutoff] = denom_cutoff\n mt[:, k] /= denomt\n\n mt[mt[:, k] < 0, k] = 0\n if alpha[0] < 0:\n if alpha[0] <= -1:\n if (alpha[0] == -1) & (np.max(mt[:, k]) > 0):\n t_threshold = mt[:, k]\n hhi = int(\n np.round(\n (np.linalg.norm(t_threshold, ord=1) / (np.linalg.norm(t_threshold, ord=2) + EPSILON)) ** 2,\n decimals=0,\n )\n )\n t_rank = np.argsort(t_threshold)\n t_threshold[t_rank[0: n - hhi]] = 0\n else:\n mt[:, k] = sparse_opt(mt[:, k], -alpha[k] - 1, False)\n else:\n mt[:, k] = sparse_opt(mt[:, k], -alpha[0], False)\n\n if (ntf_unimodal > 0) & (ntf_left_components > 0):\n # Enforce unimodal distribution\n tmax = np.argmax(mt[:, k])\n for i in range(tmax + 1, n):\n mt[i, k] = min(mt[i - 1, k], mt[i, k])\n\n for i in range(tmax - 1, -1, -1):\n mt[i, k] = min(mt[i + 1, k], mt[i, k])\n\n if (ntf_smooth > 0) & (ntf_left_components > 0):\n # Smooth distribution\n a[0] = 0.75 * mt[0, k] + 0.25 * mt[1, k]\n a[n - 1] = 0.25 * mt[n - 2, k] + 0.75 * mt[n - 1, k]\n for i in range(1, n - 1):\n a[i] = 0.25 * mt[i - 1, k] + 0.5 * mt[i, k] + 0.25 * mt[i + 1, k]\n\n mt[:, k] = a\n\n if norm_lhe:\n norm = np.linalg.norm(mt[:, k])\n if norm > 0:\n mt[:, k] /= norm\n\n if nmf_fix_user_rhe == 0:\n # Update Mw\n mw[:, k] = 0\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n mw[:, k] += mpart[:, id_blockp[i_block]: id_blockp[i_block] + p].T @ mt[:, k] * mb[i_block, k]\n else:\n mw[:, k] += mpart[:, id_blockp[0]: id_blockp[0] + p].T @ mt[:, k]\n\n if n_mmis > 0:\n denomw[:] = 0\n mt2[:] = mt[:, k] ** 2\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n # Broadcast missing cells into Mw to calculate Mt.T * Mt\n denomw += mb[i_block, k] ** 2 * mmis[:, id_blockp[i_block]: id_blockp[i_block] + p].T @ mt2\n else:\n denomw += mmis[:, id_blockp[0]: id_blockp[0] + p].T @ mt2\n\n denomw /= np.max(denomw)\n denomw[denomw < denom_cutoff] = denom_cutoff\n mw[:, k] /= denomw\n\n mw[mw[:, k] < 0, k] = 0\n\n if alpha[0] > 0:\n if alpha[0] >= 1:\n if (alpha[0] == 1) & (np.max(mw[:, k]) > 0):\n w_threshold = mw[:, k]\n hhi = int(\n np.round(\n (np.linalg.norm(w_threshold, ord=1) / (np.linalg.norm(w_threshold, ord=2) + EPSILON)) ** 2,\n decimals=0,\n )\n )\n w_rank = np.argsort(w_threshold)\n w_threshold[w_rank[0: p - hhi]] = 0\n else:\n mw[:, k] = sparse_opt(mw[:, k], alpha[k] - 1, False)\n else:\n mw[:, k] = sparse_opt(mw[:, k], alpha[0], False)\n\n if (ntf_unimodal > 0) & (ntf_right_components > 0):\n # Enforce unimodal distribution\n wmax = np.argmax(mw[:, k])\n for j in range(wmax + 1, p):\n mw[j, k] = min(mw[j - 1, k], mw[j, k])\n\n for j in range(wmax - 1, -1, -1):\n mw[j, k] = min(mw[j + 1, k], mw[j, k])\n\n if (ntf_smooth > 0) & (ntf_right_components > 0):\n # Smooth distribution\n b[0] = 0.75 * mw[0, k] + 0.25 * mw[1, k]\n b[p - 1] = 0.25 * mw[p - 2, k] + 0.75 * mw[p - 1, k]\n for j in range(1, p - 1):\n b[j] = 0.25 * mw[j - 1, k] + 0.5 * mw[j, k] + 0.25 * mw[j + 1, k]\n\n mw[:, k] = b\n\n if n_nmf_priors > 0:\n mw[:, k] = mw[:, k] * nmf_priors[:, k]\n\n if norm_rhe:\n norm = np.linalg.norm(mw[:, k])\n if norm > 0:\n mw[:, k] /= norm\n\n if nmf_fix_user_bhe == 0:\n # Update Mb\n mb[:, k] = 0\n mt_mw[:] = np.reshape((np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))), nxp)\n\n for i_block in range(0, n_blocks):\n mb[i_block, k] = np.reshape(mpart[:, id_blockp[i_block]: id_blockp[i_block] + p], nxp).T @ mt_mw\n\n if n_mmis > 0:\n mt_mw[:] = mt_mw[:] ** 2\n for i_block in range(0, n_blocks):\n # Broadcast missing cells into Mb to calculate Mb.T * Mb\n denom_block[i_block, k] = (\n np.reshape(mmis[:, id_blockp[i_block]: id_blockp[i_block] + p], (1, nxp)) @ mt_mw\n )\n\n maxdenom_block = np.max(denom_block[:, k])\n denom_block[denom_block[:, k] < denom_cutoff * maxdenom_block] = denom_cutoff * maxdenom_block\n mb[:, k] /= denom_block[:, k]\n\n mb[mb[:, k] < 0, k] = 0\n\n if (ntf_unimodal > 0) & (ntf_block_components > 0):\n # Enforce unimodal distribution\n bmax = np.argmax(mb[:, k])\n for i_block in range(bmax + 1, n_blocks):\n mb[i_block, k] = min(mb[i_block - 1, k], mb[i_block, k])\n\n for i_block in range(bmax - 1, -1, -1):\n mb[i_block, k] = min(mb[i_block + 1, k], mb[i_block, k])\n\n if (ntf_smooth > 0) & (ntf_block_components > 0):\n # Smooth distribution\n c[0] = 0.75 * mb[0, k] + 0.25 * mb[1, k]\n c[n_blocks - 1] = 0.25 * mb[n_blocks - 2, k] + 0.75 * mb[n_blocks - 1, k]\n for i_block in range(1, n_blocks - 1):\n c[i_block] = 0.25 * mb[i_block - 1, k] + 0.5 * mb[i_block, k] + 0.25 * mb[i_block + 1, k]\n\n mb[:, k] = c\n\n if norm_bhe:\n norm = np.linalg.norm(mb[:, k])\n if norm > 0:\n mb[:, k] /= norm\n\n # Update residual tensor\n mfit[:, :] = 0\n if n_blocks > 1:\n for i_block in range(0, n_blocks):\n mfit[:, id_blockp[i_block]: id_blockp[i_block] + p] += (\n mb[i_block, k] * np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))\n )\n else:\n mfit[:, id_blockp[0]: id_blockp[0] + p] += np.reshape(mt[:, k], (n, 1)) @ np.reshape(mw[:, k], (1, p))\n\n if n_mmis > 0:\n mres[:, :] = (mpart - mfit) * mmis\n else:\n mres[:, :] = mpart - mfit\n\n return (\n n_blocks,\n mpart,\n id_blockp,\n p,\n mb,\n k,\n mt,\n n,\n mw,\n n_mmis,\n mmis,\n mres,\n nmf_fix_user_lhe,\n denomt,\n mw2,\n denom_cutoff,\n alpha,\n ntf_unimodal,\n ntf_left_components,\n ntf_smooth,\n a,\n nmf_fix_user_rhe,\n denomw,\n mt2,\n ntf_right_components,\n b,\n nmf_fix_user_bhe,\n mt_mw,\n nxp,\n denom_block,\n ntf_block_components,\n c,\n mfit,\n nmf_priors,\n )\n" ]
[ [ "numpy.zeros", "numpy.reshape", "numpy.argsort", "numpy.copy", "numpy.argmax", "numpy.arange", "numpy.max", "numpy.array", "numpy.where", "numpy.linalg.norm", "numpy.mean" ] ]
wwt17/texar-pytorch
[ "9fb3ae8f7b541da5c808357033a93fba1817bfbd" ]
[ "texar/torch/modules/decoders/rnn_decoders_test.py" ]
[ "\"\"\"\nUnit tests for RNN decoders.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom texar.torch.hyperparams import HParams\nfrom texar.torch.modules.decoders.decoder_helpers import get_helper\nfrom texar.torch.modules.decoders.rnn_decoders import (\n AttentionRNNDecoder, AttentionRNNDecoderOutput, BasicRNNDecoder,\n BasicRNNDecoderOutput)\nfrom texar.torch.modules.embedders.embedders import WordEmbedder\nfrom texar.torch.utils.utils import map_structure\n\n\nclass BasicRNNDecoderTest(unittest.TestCase):\n r\"\"\"Tests :class:`~texar.torch.modules.decoders.rnn_decoders.BasicRNNDecoder`.\n \"\"\"\n\n def setUp(self):\n self._vocab_size = 4\n self._max_time = 8\n self._batch_size = 16\n self._emb_dim = 20\n self._inputs = torch.randint(\n self._vocab_size, size=(self._batch_size, self._max_time))\n embedding = torch.rand(\n self._vocab_size, self._emb_dim, dtype=torch.float)\n self._embedder = WordEmbedder(init_value=embedding)\n self._hparams = HParams(None, BasicRNNDecoder.default_hparams())\n\n def _test_outputs(self, decoder, outputs, final_state, sequence_lengths,\n test_mode=False):\n hidden_size = decoder.hparams.rnn_cell.kwargs.num_units\n\n self.assertIsInstance(outputs, BasicRNNDecoderOutput)\n max_time = (self._max_time if not test_mode\n else max(sequence_lengths).item())\n self.assertEqual(\n outputs.logits.shape,\n (self._batch_size, max_time, self._vocab_size))\n if not test_mode:\n np.testing.assert_array_equal(\n sequence_lengths, [max_time] * self._batch_size)\n self.assertEqual(final_state[0].shape, (self._batch_size, hidden_size))\n\n def test_decode_train(self):\n r\"\"\"Tests decoding in training mode.\n \"\"\"\n decoder = BasicRNNDecoder(\n token_embedder=self._embedder, input_size=self._emb_dim,\n vocab_size=self._vocab_size, hparams=self._hparams)\n sequence_length = torch.tensor([self._max_time] * self._batch_size)\n\n # Helper by default HParams\n helper_train = decoder.create_helper()\n outputs, final_state, sequence_lengths = decoder(\n helper=helper_train, inputs=self._inputs,\n sequence_length=sequence_length)\n self._test_outputs(decoder, outputs, final_state, sequence_lengths)\n\n # Helper by decoding strategy\n helper_train = decoder.create_helper(decoding_strategy='train_greedy')\n outputs, final_state, sequence_lengths = decoder(\n helper=helper_train, inputs=self._inputs,\n sequence_length=sequence_length)\n self._test_outputs(decoder, outputs, final_state, sequence_lengths)\n\n # Implicit helper\n outputs, final_state, sequence_lengths = decoder(\n inputs=self._inputs, sequence_length=sequence_length)\n self._test_outputs(decoder, outputs, final_state, sequence_lengths)\n\n # Eval helper through forward args\n outputs, final_state, sequence_lengths = decoder(\n embedding=self._embedder,\n start_tokens=torch.tensor([1] * self._batch_size),\n end_token=2, infer_mode=True)\n self._test_outputs(\n decoder, outputs, final_state, sequence_lengths, test_mode=True)\n\n @staticmethod\n def _assert_tensor_equal(a: torch.Tensor, b: torch.Tensor) -> bool:\n if torch.is_tensor(a):\n a = a.detach().numpy()\n if torch.is_tensor(b):\n b = b.detach().numpy()\n if any(np.issubdtype(array.dtype, np.floating) for array in [a, b]):\n return np.testing.assert_allclose(a, b, rtol=1e-5, atol=1e-8)\n return np.testing.assert_array_equal(a, b)\n\n def test_decode_train_with_torch(self):\n r\"\"\"Compares decoding results with PyTorch built-in decoder.\n \"\"\"\n decoder = BasicRNNDecoder(\n token_embedder=self._embedder, input_size=self._emb_dim,\n vocab_size=self._vocab_size, hparams=self._hparams)\n\n input_size = self._emb_dim\n hidden_size = decoder.hparams.rnn_cell.kwargs.num_units\n num_layers = decoder.hparams.rnn_cell.num_layers\n torch_lstm = nn.LSTM(input_size, hidden_size, num_layers,\n batch_first=True)\n\n # match parameters\n for name in ['weight_ih', 'weight_hh', 'bias_ih', 'bias_hh']:\n setattr(torch_lstm, f'{name}_l0',\n getattr(decoder._cell._cell, name))\n torch_lstm.flatten_parameters()\n\n output_layer = decoder._output_layer\n input_lengths = torch.tensor([self._max_time] * self._batch_size)\n inputs = torch.randint(\n self._vocab_size, size=(self._batch_size, self._max_time))\n\n # decoder outputs\n helper_train = decoder.create_helper()\n outputs, final_state, sequence_lengths = decoder(\n inputs=inputs,\n sequence_length=input_lengths,\n helper=helper_train)\n\n # torch LSTM outputs\n lstm_inputs = F.embedding(inputs, self._embedder.embedding)\n torch_outputs, torch_states = torch_lstm(lstm_inputs)\n torch_outputs = output_layer(torch_outputs)\n torch_sample_id = torch.argmax(torch_outputs, dim=-1)\n\n self.assertEqual(final_state[0].shape,\n (self._batch_size, hidden_size))\n\n self._assert_tensor_equal(outputs.logits, torch_outputs)\n self._assert_tensor_equal(outputs.sample_id, torch_sample_id)\n self._assert_tensor_equal(final_state[0], torch_states[0].squeeze(0))\n self._assert_tensor_equal(final_state[1], torch_states[1].squeeze(0))\n self._assert_tensor_equal(sequence_lengths, input_lengths)\n\n def test_decode_infer(self):\n r\"\"\"Tests decoding in inference mode.\"\"\"\n decoder = BasicRNNDecoder(\n token_embedder=self._embedder, input_size=self._emb_dim,\n vocab_size=self._vocab_size, hparams=self._hparams)\n\n decoder.eval()\n start_tokens = torch.tensor([self._vocab_size - 2] * self._batch_size)\n\n helpers = []\n for strategy in ['infer_greedy', 'infer_sample']:\n helper = decoder.create_helper(\n decoding_strategy=strategy,\n start_tokens=start_tokens,\n end_token=self._vocab_size - 1)\n helpers.append(helper)\n for klass in ['TopKSampleEmbeddingHelper', 'SoftmaxEmbeddingHelper',\n 'GumbelSoftmaxEmbeddingHelper']:\n helper = get_helper(\n klass, start_tokens=start_tokens,\n end_token=self._vocab_size - 1,\n top_k=self._vocab_size // 2, tau=2.0,\n straight_through=True)\n helpers.append(helper)\n\n for helper in helpers:\n max_length = 100\n outputs, final_state, sequence_lengths = decoder(\n helper=helper, max_decoding_length=max_length)\n self.assertLessEqual(max(sequence_lengths), max_length)\n self._test_outputs(decoder, outputs, final_state, sequence_lengths,\n test_mode=True)\n\n\nclass AttentionRNNDecoderTest(unittest.TestCase):\n r\"\"\"Tests :class:`~texar.torch.modules.decoders.rnn_decoders.AttentionRNNDecoder`.\n \"\"\"\n\n def setUp(self):\n self._vocab_size = 10\n self._max_time = 16\n self._batch_size = 8\n self._emb_dim = 20\n self._attention_dim = 256\n self._inputs = torch.randint(\n self._vocab_size, size=(self._batch_size, self._max_time))\n embedding = torch.rand(\n self._vocab_size, self._emb_dim, dtype=torch.float)\n self._embedder = WordEmbedder(init_value=embedding)\n self._encoder_output = torch.rand(\n self._batch_size, self._max_time, 64)\n\n self._test_hparams = {} # (cell_type, is_multi) -> hparams\n for cell_type in [\"RNNCell\", \"LSTMCell\", \"GRUCell\"]:\n hparams = {\n \"rnn_cell\": {\n 'type': cell_type,\n 'kwargs': {\n 'num_units': 256,\n },\n },\n \"attention\": {\n \"kwargs\": {\n \"num_units\": self._attention_dim\n },\n }\n }\n self._test_hparams[(cell_type, False)] = HParams(\n hparams, AttentionRNNDecoder.default_hparams())\n\n hparams = {\n \"rnn_cell\": {\n 'type': 'LSTMCell',\n 'kwargs': {\n 'num_units': 256,\n },\n 'num_layers': 3,\n },\n \"attention\": {\n \"kwargs\": {\n \"num_units\": self._attention_dim\n },\n }\n }\n self._test_hparams[(\"LSTMCell\", True)] = HParams(\n hparams, AttentionRNNDecoder.default_hparams())\n\n def _test_outputs(self, decoder, outputs, final_state, sequence_lengths,\n test_mode=False):\n hidden_size = decoder.hparams.rnn_cell.kwargs.num_units\n cell_type = decoder.hparams.rnn_cell.type\n is_multi = decoder.hparams.rnn_cell.num_layers > 1\n\n self.assertIsInstance(outputs, AttentionRNNDecoderOutput)\n max_time = (self._max_time if not test_mode\n else max(sequence_lengths).item())\n self.assertEqual(\n outputs.logits.shape,\n (self._batch_size, max_time, self._vocab_size))\n if not test_mode:\n np.testing.assert_array_equal(\n sequence_lengths, [max_time] * self._batch_size)\n\n map_structure(\n lambda t: self.assertEqual(\n t.size(), (self._batch_size, hidden_size)),\n final_state.cell_state)\n state = final_state.cell_state\n if is_multi:\n self.assertIsInstance(state, list)\n state = state[0]\n if cell_type == \"LSTMCell\":\n self.assertIsInstance(state, tuple)\n state = state[0]\n self.assertIsInstance(state, torch.Tensor)\n\n def test_decode_infer(self):\n r\"\"\"Tests decoding in inference mode.\n \"\"\"\n seq_length = np.random.randint(\n self._max_time, size=[self._batch_size]) + 1\n encoder_values_length = torch.tensor(seq_length)\n\n for (cell_type, is_multi), hparams in self._test_hparams.items():\n decoder = AttentionRNNDecoder(\n encoder_output_size=64,\n token_embedder=self._embedder,\n vocab_size=self._vocab_size,\n input_size=self._emb_dim,\n hparams=hparams)\n\n decoder.eval()\n\n helper_infer = decoder.create_helper(\n start_tokens=torch.tensor([1] * self._batch_size), end_token=2)\n\n outputs, final_state, sequence_lengths = decoder(\n memory=self._encoder_output,\n memory_sequence_length=encoder_values_length,\n helper=helper_infer)\n\n self._test_outputs(decoder, outputs, final_state, sequence_lengths,\n test_mode=True)\n\n def test_decode_train(self):\n r\"\"\"Tests decoding in training mode.\n \"\"\"\n seq_length = np.random.randint(\n self._max_time, size=[self._batch_size]) + 1\n encoder_values_length = torch.tensor(seq_length)\n\n for (cell_type, is_multi), hparams in self._test_hparams.items():\n decoder = AttentionRNNDecoder(\n encoder_output_size=64,\n token_embedder=self._embedder,\n vocab_size=self._vocab_size,\n input_size=self._emb_dim,\n hparams=hparams)\n\n sequence_length = torch.tensor([self._max_time] * self._batch_size)\n\n helper_train = decoder.create_helper()\n outputs, final_state, sequence_lengths = decoder(\n memory=self._encoder_output,\n memory_sequence_length=encoder_values_length,\n helper=helper_train,\n inputs=self._inputs,\n sequence_length=sequence_length)\n\n self._test_outputs(decoder, outputs, final_state, sequence_lengths)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.nn.LSTM", "torch.randint", "torch.nn.functional.embedding", "torch.argmax", "torch.rand", "numpy.issubdtype", "torch.tensor", "numpy.testing.assert_array_equal", "torch.is_tensor", "numpy.testing.assert_allclose", "numpy.random.randint" ] ]
artemkurylev/Context-Aware_Crowd_Counting-pytorch
[ "d68ddd87b99f2afc512357cb8fcb0ca41ea22865" ]
[ "train.py" ]
[ "import numpy as np\nimport time\nimport torch\nimport torch.nn as nn\nimport os\nimport visdom\nimport random\nfrom tqdm import tqdm as tqdm\n\nfrom cannet import CANNet\nfrom my_dataset import CrowdDataset\n\nif __name__==\"__main__\":\n # configuration\n train_image_root='./data/Shanghai_part_A/train_data/images'\n train_dmap_root='./data/Shanghai_part_A/train_data/ground_truth'\n test_image_root='./data/Shanghai_part_A/test_data/images'\n test_dmap_root='./data/Shanghai_part_A/test_data/ground_truth'\n gpu_or_cpu='cuda' # use cuda or cpu\n lr = 1e-7\n batch_size = 1\n momentum = 0.95\n epochs = 20000\n steps = [-1,1,100,150]\n scales = [1,1,1,1]\n workers = 4\n seed = time.time()\n print_freq = 30 \n \n vis=visdom.Visdom()\n device=torch.device(gpu_or_cpu)\n torch.cuda.manual_seed(seed)\n model=CANNet().to(device)\n criterion=nn.MSELoss(size_average=False).to(device)\n optimizer=torch.optim.SGD(model.parameters(),lr,\n momentum=momentum,\n weight_decay=0)\n# optimizer=torch.optim.Adam(model.parameters(),lr)\n train_dataset=CrowdDataset(train_image_root,train_dmap_root,gt_downsample=8,phase='train')\n train_loader=torch.utils.data.DataLoader(train_dataset,batch_size=1,shuffle=True)\n test_dataset=CrowdDataset(test_image_root,test_dmap_root,gt_downsample=8,phase='test')\n test_loader=torch.utils.data.DataLoader(test_dataset,batch_size=1,shuffle=False)\n \n if not os.path.exists('./checkpoints'):\n os.mkdir('./checkpoints')\n min_mae=10000\n min_epoch=0\n train_loss_list=[]\n epoch_list=[]\n test_error_list=[]\n for epoch in range(0,epochs):\n # training phase\n model.train()\n epoch_loss=0\n for i,(img,gt_dmap) in enumerate(tqdm(train_loader)):\n img=img.to(device)\n gt_dmap=gt_dmap.to(device)\n # forward propagation\n et_dmap=model(img)\n # calculate loss\n loss=criterion(et_dmap,gt_dmap)\n epoch_loss+=loss.item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n# print(\"epoch:\",epoch,\"loss:\",epoch_loss/len(dataloader))\n epoch_list.append(epoch)\n train_loss_list.append(epoch_loss/len(train_loader))\n torch.save(model.state_dict(),'./checkpoints/epoch_'+str(epoch)+\".pth\")\n \n # testing phase\n model.eval()\n mae=0\n for i,(img,gt_dmap) in enumerate(tqdm(test_loader)):\n img=img.to(device)\n gt_dmap=gt_dmap.to(device)\n # forward propagation\n et_dmap=model(img)\n mae+=abs(et_dmap.data.sum()-gt_dmap.data.sum()).item()\n del img,gt_dmap,et_dmap\n if mae/len(test_loader)<min_mae:\n min_mae=mae/len(test_loader)\n min_epoch=epoch\n test_error_list.append(mae/len(test_loader))\n print(\"epoch:\"+str(epoch)+\" error:\"+str(mae/len(test_loader))+\" min_mae:\"+str(min_mae)+\" min_epoch:\"+str(min_epoch))\n vis.line(win=1,X=epoch_list, Y=train_loss_list, opts=dict(title='train_loss'))\n vis.line(win=2,X=epoch_list, Y=test_error_list, opts=dict(title='test_error'))\n # show an image\n index=random.randint(0,len(test_loader)-1)\n img,gt_dmap=test_dataset[index]\n vis.image(win=3,img=img,opts=dict(title='img'))\n vis.image(win=4,img=gt_dmap/(gt_dmap.max())*255,opts=dict(title='gt_dmap('+str(gt_dmap.sum())+')'))\n img=img.unsqueeze(0).to(device)\n gt_dmap=gt_dmap.unsqueeze(0)\n et_dmap=model(img)\n et_dmap=et_dmap.squeeze(0).detach().cpu().numpy()\n vis.image(win=5,img=et_dmap/(et_dmap.max())*255,opts=dict(title='et_dmap('+str(et_dmap.sum())+')'))\n \n import time\n print(time.strftime('%Y.%m.%d %H:%M:%S',time.localtime(time.time())))\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n " ]
[ [ "torch.cuda.manual_seed", "torch.utils.data.DataLoader", "torch.device", "torch.nn.MSELoss" ] ]
subhadip7879/neural-net
[ "04aacf7cdec89ea0f58f2c7397c72adefa8c2d4e" ]
[ "image-classifier/image-classifier.py" ]
[ "from IPython.display import Image\n\n\nImage('images/02_network_flowchart.png')\nImage('images/02_convolution.png')\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport time\nfrom datetime import timedelta\nimport math\nfrom tensorflow.examples.tutorials.mnist import input_data\n\ntf.__version__\n#Convolutional Layer 1.\n# will connect each neuron to only a local region of the input volume \n# Convolution filters are 5 x 5 pixels.\nfilter_size1 = 5 \nnum_filters1 = 16 \n\n\n# Convolutional Layer 2.\nfilter_size2 = 5 \nnum_filters2 = 36 \n\n# Fully-connected layer.\nfc_size = 128 \n\n\ndata = input_data.read_data_sets('data/MNIST/', one_hot=True)\n\nprint(\"Size of:\")\nprint(\"- Training-set:\\t\\t{}\".format(len(data.train.labels)))\nprint(\"- Test-set:\\t\\t{}\".format(len(data.test.labels)))\nprint(\"- Validation-set:\\t{}\".format(len(data.validation.labels)))\n\ndata.test.cls = np.argmax(data.test.labels, axis=1)\n\nimg_size = 28\n# Images are stored in 1d array of this length.\nimg_size_flat = img_size * img_size\nimg_shape = (img_size, img_size)\nnum_channels = 1\nnum_classes = 10\n\ndef plot_images(images, cls_true, cls_pred=None):\n assert len(images) == 9 \n len(cls_true) == 9\n fig, axes = plt.subplots(3, 3)\n fig.subplots_adjust(hspace=0.3, wspace=0.3)\n for i, ax in enumerate(axes.flat):\n # Plot image.\n ax.imshow(images[i].reshape(img_shape), cmap='binary')\n \n if cls_pred is None:\n xlabel = \"True: {0}\".format(cls_true[i])\n else:\n xlabel = \"True: {0}, Pred: {1}\".format(cls_true[i], cls_pred[i])\n \n ax.set_xlabel(xlabel)\n \n ax.set_xticks([])\n ax.set_yticks([])\n plt.show()\n \n\n# first images from mnist\nimages = data.test.images[0:9]\ncls_true = data.test.cls[0:9]\n# Plot the images and labels\nplot_images(images=images, cls_true=cls_true)\n\ndef new_weights(shape):\n return tf.Variable(tf.truncated_normal(shape, stddev=0.05))\n\ndef new_biases(length):\n return tf.Variable(tf.constant(0.05, shape=[length]))\n\ndef new_conv_layer(input,num_input_channels, filter_size,num_filters,use_pooling=True):\n \n shape = [filter_size, filter_size, num_input_channels, num_filters]\n weights = new_weights(shape=shape)\n biases = new_biases(length=num_filters)\n layer = tf.nn.conv2d(input=input,\n filter=weights,\n strides=[1, 1, 1, 1],\n padding='SAME')\n\n \n layer += biases\n \n if use_pooling:\n layer = tf.nn.max_pool(value=layer,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n layer = tf.nn.relu(layer)\n return layer, weights\n\ndef flatten_layer(layer):\n layer_shape = layer.get_shape()\n num_features = layer_shape[1:4].num_elements()\n layer_flat = tf.reshape(layer, [-1, num_features])\n return layer_flat, num_features\n\n\ndef new_fc_layer(input, num_inputs,num_outputs,use_relu=True):\n weights = new_weights(shape=[num_inputs, num_outputs])\n biases = new_biases(length=num_outputs)\n layer = tf.matmul(input, weights) + biases\n \n if use_relu:\n layer = tf.nn.relu(layer)\n\n return layer\n\nx = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')\nx_image = tf.reshape(x, [-1, img_size, img_size, num_channels])\ny_true = tf.placeholder(tf.float32, shape=[None, 10], name='y_true')\ny_true_cls = tf.argmax(y_true, dimension=1)\n\n\nlayer_conv1, weights_conv1 = \\\n new_conv_layer(input=x_image,\n num_input_channels=num_channels,\n filter_size=filter_size1,\n num_filters=num_filters1,\n use_pooling=True)\nlayer_conv1\n\nlayer_conv2, weights_conv2 = \\\n new_conv_layer(input=layer_conv1,\n num_input_channels=num_filters1,\n filter_size=filter_size2,\n num_filters=num_filters2,\n use_pooling=True)\nlayer_conv2\n\nlayer_flat, num_features = flatten_layer(layer_conv2)\nlayer_flat\n\nnum_features\n\n\nlayer_fc1 = new_fc_layer(input=layer_flat,\n num_inputs=num_features,\n num_outputs=fc_size,\n use_relu=True)\nlayer_fc1\n\nlayer_fc2 = new_fc_layer(input=layer_fc1,\n num_inputs=fc_size,\n num_outputs=num_classes,\n use_relu=False)\nlayer_fc2\n\ny_pred = tf.nn.softmax(layer_fc2)\ny_pred_cls = tf.argmax(y_pred, dimension=1)\n\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,\n labels=y_true)\ncost = tf.reduce_mean(cross_entropy)\n\noptimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)\ncorrect_prediction = tf.equal(y_pred_cls, y_true_cls)\n\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsession = tf.Session()\nsession.run(tf.global_variables_initializer())\n\ntrain_batch_size = 64\ntotal_iterations = 0\n\ndef optimize(num_iterations):\n global total_iterations\n start_time = time.time()\n for i in range(total_iterations, total_iterations + num_iterations):\n x_batch, y_true_batch = data.train.next_batch(train_batch_size)\n feed_dict_train = {x: x_batch, y_true: y_true_batch}\n session.run(optimizer, feed_dict=feed_dict_train)\n\n if i % 100 == 0:\n acc = session.run(accuracy, feed_dict=feed_dict_train)\n msg = \"Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}\"\n print(msg.format(i + 1, acc))\n \n total_iterations += num_iterations\n end_time = time.time()\n time_dif = end_time - start_time\n print(\"Time usage: \" + str(timedelta(seconds=int(round(time_dif)))))\n \ndef plot_example_errors(cls_pred, correct):\n incorrect = (correct == False)\n images = data.test.images[incorrect]\n cls_pred = cls_pred[incorrect]\n cls_true = data.test.cls[incorrect]\n plot_images(images=images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9])\n \ndef plot_confusion_matrix(cls_pred):\n cls_true = data.test.cls\n cm = confusion_matrix(y_true=cls_true, y_pred=cls_pred)\n print(cm)\n plt.matshow(cm)\n\n \n plt.colorbar()\n tick_marks = np.arange(num_classes)\n plt.xticks(tick_marks, range(num_classes))\n plt.yticks(tick_marks, range(num_classes))\n plt.xlabel('Predicted')\n plt.ylabel('True')\n plt.show()\n\ntest_batch_size = 256\ndef print_test_accuracy(show_example_errors=False, show_confusion_matrix=False):\n num_test = len(data.test.images)\n cls_pred = np.zeros(shape=num_test, dtype=np.int)\n \n i = 0\n\n while i < num_test:\n j = min(i + test_batch_size, num_test)\n images = data.test.images[i:j, :]\n labels = data.test.labels[i:j, :]\n feed_dict = {x: images, y_true: labels}\n cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)\n i = j\n \n cls_true = data.test.cls\n correct = (cls_true == cls_pred)\n correct_sum = correct.sum()\n acc = float(correct_sum) / num_test\n msg = \"Accuracy on Test-Set: {0:.1%} ({1} / {2})\"\n print(msg.format(acc, correct_sum, num_test))\n if show_example_errors:\n print(\"Example errors:\")\n plot_example_errors(cls_pred=cls_pred, correct=correct)\n \n if show_confusion_matrix:\n print(\"Confusion Matrix:\")\n plot_confusion_matrix(cls_pred=cls_pred)\n \nprint_test_accuracy()\n\noptimize(num_iterations=1)\nprint_test_accuracy()\n\noptimize(num_iterations=99) \nprint_test_accuracy(show_example_errors=True)\n\noptimize(num_iterations=900)\nprint_test_accuracy(show_example_errors=True)\n\noptimize(num_iterations=9000)\nprint_test_accuracy(show_example_errors=True, show_confusion_matrix=True)\n\ndef plot_conv_weights(weights, input_channel=0):\n w = session.run(weights)\n w_min = np.min(w)\n w_max = np.max(w)\n num_filters = w.shape[3]\n num_grids = math.ceil(math.sqrt(num_filters))\n fig, axes = plt.subplots(num_grids, num_grids)\n for i, ax in enumerate(axes.flat):\n if i<num_filters:\n img = w[:, :, input_channel, i]\n ax.imshow(img, vmin=w_min, vmax=w_max,\n interpolation='nearest', cmap='seismic')\n \n ax.set_xticks([])\n ax.set_yticks([])\n plt.show()\n\ndef plot_conv_layer(layer, image):\n feed_dict = {x: [image]}\n values = session.run(layer, feed_dict=feed_dict)\n num_filters = values.shape[3]\n num_grids = math.ceil(math.sqrt(num_filters))\n fig, axes = plt.subplots(num_grids, num_grids)\n for i, ax in enumerate(axes.flat):\n if i<num_filters:\n img = values[0, :, :, i]\n\n ax.imshow(img, interpolation='nearest', cmap='binary')\n \n ax.set_xticks([])\n ax.set_yticks([])\n \n plt.show()\n \ndef plot_image(image):\n plt.imshow(image.reshape(img_shape),\n interpolation='nearest',\n cmap='binary')\n\n plt.show()\n \n\nimage1 = data.test.images[0]\nplot_image(image1)\n\nimage2 = data.test.images[13]\nplot_image(image2)\n\nplot_conv_weights(weights=weights_conv1)\nplot_conv_layer(layer=layer_conv1, image=image1)\nplot_conv_layer(layer=layer_conv1, image=image2)\nplot_conv_weights(weights=weights_conv2, input_channel=0)\nplot_conv_weights(weights=weights_conv2, input_channel=1)\nplot_conv_layer(layer=layer_conv2, image=image1)\nplot_conv_layer(layer=layer_conv2, image=image2)\n\n\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.reshape", "tensorflow.matmul", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.matshow", "tensorflow.nn.softmax", "tensorflow.nn.max_pool", "tensorflow.global_variables_initializer", "tensorflow.constant", "numpy.zeros", "numpy.argmax", "matplotlib.pyplot.subplots", "sklearn.metrics.confusion_matrix", "tensorflow.cast", "numpy.arange", "numpy.max", "numpy.min", "tensorflow.Session", "matplotlib.pyplot.colorbar", "tensorflow.placeholder", "tensorflow.equal", "tensorflow.truncated_normal", "tensorflow.train.AdamOptimizer", "tensorflow.reduce_mean", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.nn.conv2d", "matplotlib.pyplot.show", "tensorflow.argmax", "tensorflow.nn.relu", "matplotlib.pyplot.xlabel" ] ]
tiancity-NJU/REID
[ "125a520a9c0b94440a7757e6f3c3c8bf976906ec" ]
[ "script/bfe.py" ]
[ "# encoding: utf-8\nimport os\nimport sys\nfrom os import path as osp\nfrom pprint import pprint\n\nimport numpy as np\nimport torch\nfrom tensorboardX import SummaryWriter\nfrom torch import nn\nfrom torch.backends import cudnn\nfrom torch.utils.data import DataLoader\n\nsys.path.insert(0,os.path.abspath(os.path.dirname(__file__)+os.sep+'..'))\n\nfrom config import opt\nfrom datasets import data_manager\nfrom datasets.data_loader import ImageData\nfrom datasets.samplers import RandomIdentitySampler\nfrom models.networks import ResNetBuilder, IDE, Resnet, BFE\n#from models.BFE import BFE\nfrom trainers.evaluator import ResNetEvaluator\nfrom trainers.trainer import cls_tripletTrainer\nfrom utils.loss import CrossEntropyLabelSmooth, TripletLoss, Margin\nfrom utils.LiftedStructure import LiftedStructureLoss\nfrom utils.DistWeightDevianceLoss import DistWeightBinDevianceLoss\nfrom utils.serialization import Logger, save_checkpoint\nfrom utils.transforms import TestTransform, TrainTransform\n\n\ndef train(**kwargs):\n opt._parse(kwargs)\n opt.model_name = 'bfe'\n # set random seed and cudnn benchmark\n torch.manual_seed(opt.seed)\n os.makedirs(opt.save_dir, exist_ok=True)\n use_gpu = torch.cuda.is_available()\n sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))\n\n print('=========user config==========')\n pprint(opt._state_dict())\n print('============end===============')\n\n if use_gpu:\n print('currently using GPU')\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(opt.seed)\n else:\n print('currently using cpu')\n\n print('initializing dataset {}'.format(opt.dataset))\n dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)\n\n pin_memory = True if use_gpu else False\n\n summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))\n\n trainloader = DataLoader(\n ImageData(dataset.train, TrainTransform(opt.datatype)),\n sampler=RandomIdentitySampler(dataset.train, opt.num_instances),\n batch_size=opt.train_batch, num_workers=opt.workers,\n pin_memory=pin_memory, drop_last=True\n )\n\n queryloader = DataLoader(\n ImageData(dataset.query, TestTransform(opt.datatype)),\n batch_size=opt.test_batch, num_workers=opt.workers,\n pin_memory=pin_memory\n )\n\n galleryloader = DataLoader(\n ImageData(dataset.gallery, TestTransform(opt.datatype)),\n batch_size=opt.test_batch, num_workers=opt.workers,\n pin_memory=pin_memory\n )\n queryFliploader = DataLoader(\n ImageData(dataset.query, TestTransform(opt.datatype, True)),\n batch_size=opt.test_batch, num_workers=opt.workers,\n pin_memory=pin_memory\n )\n\n galleryFliploader = DataLoader(\n ImageData(dataset.gallery, TestTransform(opt.datatype, True)),\n batch_size=opt.test_batch, num_workers=opt.workers,\n pin_memory=pin_memory\n )\n\n print('initializing model ...')\n\n\n model = BFE(dataset.num_train_pids, 1.0, 0.33)\n\n\n optim_policy = model.get_optim_policy()\n\n if opt.pretrained_model:\n state_dict = torch.load(opt.pretrained_model)['state_dict']\n # state_dict = {k: v for k, v in state_dict.items() \\\n # if not ('reduction' in k or 'softmax' in k)}\n model.load_state_dict(state_dict, False)\n print('load pretrained model ' + opt.pretrained_model)\n print('model size: {:.5f}M'.format(sum(p.numel() for p in model.parameters()) / 1e6))\n\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n reid_evaluator = ResNetEvaluator(model)\n\n if opt.evaluate:\n reid_evaluator.evaluate(queryloader, galleryloader,\n queryFliploader, galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig)\n return\n\n # xent_criterion = nn.CrossEntropyLoss()\n xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids)\n\n if opt.loss == 'triplet':\n embedding_criterion = TripletLoss(opt.margin)\n elif opt.loss == 'lifted':\n embedding_criterion = LiftedStructureLoss(hard_mining=True)\n elif opt.loss == 'weight':\n embedding_criterion = Margin()\n\n def criterion(triplet_y, softmax_y, labels):\n losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \\\n [xent_criterion(output, labels) for output in softmax_y]\n loss = sum(losses)\n return loss\n\n # get optimizer\n if opt.optim == \"sgd\":\n optimizer = torch.optim.SGD(optim_policy, lr=opt.lr, momentum=0.9, weight_decay=opt.weight_decay)\n else:\n optimizer = torch.optim.Adam(optim_policy, lr=opt.lr, weight_decay=opt.weight_decay)\n\n start_epoch = opt.start_epoch\n # get trainer and evaluator\n reid_trainer = cls_tripletTrainer(opt, model, optimizer, criterion, summary_writer)\n\n def adjust_lr(optimizer, ep):\n if ep < 50:\n lr = 1e-4 * (ep // 5 + 1)\n elif ep < 200:\n lr = 1e-3\n elif ep < 300:\n lr = 1e-4\n else:\n lr = 1e-5\n for p in optimizer.param_groups:\n p['lr'] = lr\n\n # start training\n best_rank1 = opt.best_rank\n best_epoch = 0\n for epoch in range(start_epoch, opt.max_epoch):\n if opt.adjust_lr:\n adjust_lr(optimizer, epoch + 1)\n reid_trainer.train(epoch, trainloader)\n\n # skip if not save model\n if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or (epoch + 1) == opt.max_epoch:\n if opt.mode == 'class':\n rank1 = test(model, queryloader)\n else:\n rank1 = reid_evaluator.evaluate(queryloader, galleryloader, queryFliploader, galleryFliploader)\n is_best = rank1 > best_rank1\n if is_best:\n best_rank1 = rank1\n best_epoch = epoch + 1\n\n if use_gpu:\n state_dict = model.module.state_dict()\n else:\n state_dict = model.state_dict()\n save_checkpoint({'state_dict': state_dict, 'epoch': epoch + 1},\n is_best=is_best, save_dir=opt.save_dir,\n filename='checkpoint_ep' + str(epoch + 1) + '.pth.tar')\n\n print('Best rank-1 {:.1%}, achived at epoch {}'.format(best_rank1, best_epoch))\n\n\ndef test(model, queryloader):\n model.eval()\n correct = 0\n with torch.no_grad():\n for data, target, _ in queryloader:\n output = model(data).cpu()\n # get the index of the max log-probability\n pred = output.max(1, keepdim=True)[1]\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n rank1 = 100. * correct / len(queryloader.dataset)\n print('\\nTest set: Accuracy: {}/{} ({:.2f}%)\\n'.format(correct, len(queryloader.dataset), rank1))\n return rank1\n\n\nif __name__ == '__main__':\n import fire\n\n fire.Fire()\n" ]
[ [ "torch.cuda.manual_seed_all", "torch.optim.SGD", "torch.load", "torch.manual_seed", "torch.no_grad", "torch.optim.Adam", "torch.cuda.is_available", "torch.nn.DataParallel" ] ]
mathildebadoual/pandapower
[ "9ba4bcb78e84b644d2ba6df0c08e285c54af8ddc" ]
[ "pandapower/build_gen.py" ]
[ "# -*- coding: utf-8 -*-\r\n\r\n# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics\r\n# and Energy System Technology (IEE), Kassel. All rights reserved.\r\n\r\n\r\nimport numpy as np\r\nimport numpy.core.numeric as ncn\r\nfrom numpy import array, zeros, isnan\r\nfrom pandas import DataFrame\r\nfrom pandapower.idx_bus import PV, REF, VA, VM, BUS_TYPE, NONE, VMAX, VMIN, PQ\r\nfrom pandapower.idx_gen import QMIN, QMAX, PMIN, PMAX, GEN_STATUS, GEN_BUS, PG, VG, QG\r\n\r\n\r\ndef _build_gen_ppc(net, ppc):\r\n '''\r\n Takes the empty ppc network and fills it with the gen values. The gen\r\n datatype will be float afterwards.\r\n\r\n **INPUT**:\r\n **net** -The pandapower format network\r\n\r\n **ppc** - The PYPOWER format network to fill in values\r\n '''\r\n\r\n mode = net[\"_options\"][\"mode\"]\r\n\r\n # if mode == power flow or short circuit...\r\n if mode == \"pf\" or mode == \"sc\":\r\n\r\n # get in service elements\r\n _is_elements = net[\"_is_elements\"]\r\n eg_is_mask = _is_elements['ext_grid']\r\n gen_is_mask = _is_elements['gen']\r\n\r\n eg_end = np.sum(eg_is_mask)\r\n gen_end = eg_end + np.sum(gen_is_mask)\r\n xw_end = gen_end + len(net[\"xward\"])\r\n\r\n # define default q limits\r\n q_lim_default = 1e9 # which is 1000 TW - should be enough for distribution grids.\r\n p_lim_default = 1e9\r\n\r\n _init_ppc_gen(ppc, xw_end, 0)\r\n if mode == \"sc\":\r\n return\r\n # add generator / pv data\r\n if gen_end > eg_end:\r\n _build_pp_gen(net, ppc, gen_is_mask, eg_end, gen_end, q_lim_default, p_lim_default)\r\n\r\n _build_pp_ext_grid(net, ppc, eg_is_mask, eg_end)\r\n\r\n # add extended ward pv node data\r\n if xw_end > gen_end:\r\n _build_pp_xward(net, ppc, gen_end, xw_end, q_lim_default)\r\n\r\n # if mode == optimal power flow...\r\n if mode == \"opf\":\r\n\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\r\n\r\n if len(net.dcline) > 0:\r\n ppc[\"dcline\"] = net.dcline[[\"loss_kw\", \"loss_percent\"]].values\r\n # get in service elements\r\n _is_elements = net[\"_is_elements\"]\r\n eg_is = net[\"ext_grid\"][_is_elements['ext_grid']]\r\n gen_is = net[\"gen\"][_is_elements['gen']]\r\n sg_is = net.sgen[(net.sgen.in_service & net.sgen.controllable) == True] \\\r\n if \"controllable\" in net.sgen.columns else DataFrame()\r\n l_is = net.load[(net.load.in_service & net.load.controllable) == True] \\\r\n if \"controllable\" in net.load.columns else DataFrame()\r\n stor_is = net.storage[(net.storage.in_service & net.storage.controllable) == True] \\\r\n if \"controllable\" in net.storage.columns else DataFrame()\r\n\r\n _is_elements[\"sgen_controllable\"] = sg_is\r\n _is_elements[\"load_controllable\"] = l_is\r\n _is_elements[\"storage_controllable\"] = stor_is\r\n eg_end = len(eg_is)\r\n gen_end = eg_end + len(gen_is)\r\n sg_end = gen_end + len(sg_is)\r\n l_end = sg_end + len(l_is)\r\n stor_end = l_end + len(stor_is)\r\n\r\n q_lim_default = 1e9 # which is 1000 TW - should be enough for distribution grids.\r\n p_lim_default = 1e9 # changes must be considered in check_opf_data\r\n delta = net[\"_options\"][\"delta\"]\r\n\r\n # initialize generator matrix\r\n ppc[\"gen\"] = zeros(shape=(stor_end, 21), dtype=float)\r\n ppc[\"gen\"][:] = array([0, 0, 0, q_lim_default, -q_lim_default, 1., 1., 1, p_lim_default,\r\n -p_lim_default, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\r\n\r\n # add sgens first so pv bus types won't be overwritten\r\n if sg_end > gen_end:\r\n gen_buses = bus_lookup[sg_is[\"bus\"].values]\r\n\r\n ppc[\"gen\"][gen_end:sg_end, GEN_BUS] = gen_buses\r\n ppc[\"gen\"][gen_end:sg_end, PG] = - sg_is[\"p_kw\"].values * 1e-3 * sg_is[\"scaling\"].values\r\n ppc[\"gen\"][gen_end:sg_end, QG] = sg_is[\"q_kvar\"].values * 1e-3 * sg_is[\"scaling\"].values\r\n\r\n # set bus values for generator buses\r\n ppc[\"bus\"][gen_buses, BUS_TYPE] = PQ\r\n\r\n # set constraints for controllable sgens\r\n if \"min_q_kvar\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, QMAX] = - (sg_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][gen_end:sg_end, [QMIN]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][gen_end:sg_end, [QMIN]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, QMIN] = - (sg_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][gen_end:sg_end, [QMAX]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][gen_end:sg_end, [QMAX]] = min_q_kvar - 1e-10 # TODO Why this? (M.Scharf, 2018-02)\r\n\r\n if \"max_p_kw\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, PMIN] = - (sg_is[\"max_p_kw\"].values * 1e-3 + delta)\r\n max_p_kw = ppc[\"gen\"][gen_end:sg_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][gen_end:sg_end, [PMIN]] = max_p_kw\r\n\r\n if \"min_p_kw\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, PMAX] = - (sg_is[\"min_p_kw\"].values * 1e-3 - delta)\r\n min_p_kw = ppc[\"gen\"][gen_end:sg_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][gen_end:sg_end, [PMAX]] = min_p_kw\r\n\r\n # add controllable loads\r\n if l_end > sg_end:\r\n load_buses = bus_lookup[l_is[\"bus\"].values]\r\n\r\n ppc[\"gen\"][sg_end:l_end, GEN_BUS] = load_buses\r\n ppc[\"gen\"][sg_end:l_end, PG] = - l_is[\"p_kw\"].values * 1e-3 * l_is[\"scaling\"].values\r\n ppc[\"gen\"][sg_end:l_end, QG] = l_is[\"q_kvar\"].values * 1e-3 * l_is[\"scaling\"].values\r\n\r\n # set bus values for controllable loads\r\n ppc[\"bus\"][load_buses, BUS_TYPE] = PQ\r\n\r\n # set constraints for controllable loads\r\n if \"min_q_kvar\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, QMAX] = - (l_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][sg_end:l_end, [QMIN]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][sg_end:l_end, [QMIN]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, QMIN] = - (l_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][sg_end:l_end, [QMAX]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][sg_end:l_end, [QMAX]] = min_q_kvar\r\n\r\n if \"min_p_kw\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, PMIN] = - (l_is[\"max_p_kw\"].values * 1e-3 + delta)\r\n max_p_kw = ppc[\"gen\"][sg_end:l_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][sg_end:l_end, [PMIN]] = max_p_kw\r\n\r\n if \"max_p_kw\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, PMAX] = - (l_is[\"min_p_kw\"].values * 1e-3 - delta)\r\n min_p_kw = ppc[\"gen\"][sg_end:l_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][sg_end:l_end, [PMAX]] = min_p_kw\r\n\r\n # add controllable storages\r\n if stor_end > l_end:\r\n stor_buses = bus_lookup[stor_is[\"bus\"].values]\r\n\r\n ppc[\"gen\"][l_end:stor_end, GEN_BUS] = stor_buses\r\n ppc[\"gen\"][l_end:stor_end, PG] = - stor_is[\"p_kw\"].values * 1e-3 * stor_is[\"scaling\"].values\r\n ppc[\"gen\"][l_end:stor_end, QG] = stor_is[\"q_kvar\"].values * 1e-3 * stor_is[\"scaling\"].values\r\n\r\n # set bus values for generator buses\r\n ppc[\"bus\"][stor_buses, BUS_TYPE] = PQ\r\n\r\n # set constraints for controllable sgens\r\n if \"min_q_kvar\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, QMAX] = - (stor_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][l_end:stor_end, [QMIN]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][l_end:stor_end, [QMIN]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, QMIN] = - (stor_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][l_end:stor_end, [QMAX]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][l_end:stor_end, [QMAX]] = min_q_kvar\r\n\r\n if \"max_p_kw\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, PMIN] = - (stor_is[\"max_p_kw\"].values * 1e-3 + delta)\r\n max_p_kw = ppc[\"gen\"][l_end:stor_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][l_end:stor_end, [PMIN]] = max_p_kw\r\n\r\n if \"min_p_kw\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, PMAX] = - (stor_is[\"min_p_kw\"].values * 1e-3 - delta)\r\n min_p_kw = ppc[\"gen\"][l_end:stor_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][l_end:stor_end, [PMAX]] = min_p_kw\r\n\r\n # add ext grid / slack data\r\n ppc[\"gen\"][:eg_end, GEN_BUS] = bus_lookup[eg_is[\"bus\"].values]\r\n ppc[\"gen\"][:eg_end, VG] = eg_is[\"vm_pu\"].values\r\n ppc[\"gen\"][:eg_end, GEN_STATUS] = eg_is[\"in_service\"].values\r\n if \"max_p_kw\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, PMIN] = - (eg_is[\"max_p_kw\"].values * 1e-3 - delta)\r\n max_p_kw = ppc[\"gen\"][:eg_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][:eg_end, [PMIN]] = max_p_kw\r\n\r\n if \"min_p_kw\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, PMAX] = - (eg_is[\"min_p_kw\"].values * 1e-3 + delta)\r\n min_p_kw = ppc[\"gen\"][:eg_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][:eg_end, [PMAX]] = min_p_kw\r\n\r\n if \"min_q_kvar\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, QMAX] = - (eg_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][:eg_end, [QMIN]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][:eg_end, [QMIN]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, QMIN] = - (eg_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][:eg_end, [QMAX]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][:eg_end, [QMAX]] = min_q_kvar - 1e-10\r\n\r\n # set bus values for external grid buses\r\n eg_buses = bus_lookup[eg_is[\"bus\"].values]\r\n if calculate_voltage_angles:\r\n ppc[\"bus\"][eg_buses, VA] = eg_is[\"va_degree\"].values\r\n ppc[\"bus\"][eg_buses, BUS_TYPE] = REF\r\n ppc[\"bus\"][eg_buses, VM] = eg_is[\"vm_pu\"].values\r\n\r\n # REF busses don't have flexible voltages by definition:\r\n ppc[\"bus\"][eg_buses, VMAX] = ppc[\"bus\"][ppc[\"bus\"][:, BUS_TYPE] == REF, VM]\r\n ppc[\"bus\"][eg_buses, VMIN] = ppc[\"bus\"][ppc[\"bus\"][:, BUS_TYPE] == REF, VM]\r\n\r\n # add generator / pv data\r\n if gen_end > eg_end:\r\n ppc[\"gen\"][eg_end:gen_end, GEN_BUS] = bus_lookup[gen_is[\"bus\"].values]\r\n ppc[\"gen\"][eg_end:gen_end, PG] = - gen_is[\"p_kw\"].values * 1e-3 * gen_is[\"scaling\"].values\r\n ppc[\"gen\"][eg_end:gen_end, VG] = gen_is[\"vm_pu\"].values\r\n\r\n # set bus values for generator buses\r\n gen_buses = bus_lookup[gen_is[\"bus\"].values]\r\n ppc[\"bus\"][gen_buses, BUS_TYPE] = PV\r\n ppc[\"bus\"][gen_buses, VM] = gen_is[\"vm_pu\"].values\r\n\r\n # set constraints for PV generators\r\n _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, _is_elements['gen'])\r\n _copy_p_limits_to_ppc(net, ppc, eg_end, gen_end, _is_elements['gen'])\r\n\r\n _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default)\r\n _replace_nans_with_default_p_limits_in_ppc(ppc, eg_end, gen_end, p_lim_default)\r\n\r\n\r\ndef _init_ppc_gen(ppc, xw_end, q_lim_default):\r\n # initialize generator matrix\r\n ppc[\"gen\"] = np.zeros(shape=(xw_end, 21), dtype=float)\r\n ppc[\"gen\"][:] = np.array([0, 0, 0, q_lim_default, -q_lim_default, 1.,\r\n 1., 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\r\n\r\n\r\ndef _build_pp_ext_grid(net, ppc, eg_is_mask, eg_end):\r\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n # add ext grid / slack data\r\n eg_buses = bus_lookup[net[\"ext_grid\"][\"bus\"].values[eg_is_mask]]\r\n ppc[\"gen\"][:eg_end, GEN_BUS] = eg_buses\r\n ppc[\"gen\"][:eg_end, VG] = net[\"ext_grid\"][\"vm_pu\"].values[eg_is_mask]\r\n ppc[\"gen\"][:eg_end, GEN_STATUS] = True\r\n\r\n # set bus values for external grid buses\r\n if calculate_voltage_angles:\r\n ppc[\"bus\"][eg_buses, VA] = net[\"ext_grid\"][\"va_degree\"].values[eg_is_mask]\r\n ppc[\"bus\"][eg_buses, BUS_TYPE] = REF\r\n # _build_gen_lookups(net, \"ext_grid\", 0, eg_end)\r\n\r\n\r\ndef _build_pp_gen(net, ppc, gen_is_mask, eg_end, gen_end, q_lim_default, p_lim_default):\r\n\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n copy_constraints_to_ppc = net[\"_options\"][\"copy_constraints_to_ppc\"]\r\n\r\n gen_buses = bus_lookup[net[\"gen\"][\"bus\"].values[gen_is_mask]]\r\n gen_is_vm = net[\"gen\"][\"vm_pu\"].values[gen_is_mask]\r\n ppc[\"gen\"][eg_end:gen_end, GEN_BUS] = gen_buses\r\n ppc[\"gen\"][eg_end:gen_end, PG] = - (net[\"gen\"][\"p_kw\"].values[gen_is_mask] * 1e-3 *\r\n net[\"gen\"][\"scaling\"].values[gen_is_mask])\r\n ppc[\"gen\"][eg_end:gen_end, VG] = gen_is_vm\r\n\r\n # set bus values for generator buses\r\n\r\n ppc[\"bus\"][gen_buses, BUS_TYPE] = PV\r\n ppc[\"bus\"][gen_buses, VM] = gen_is_vm\r\n\r\n _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask)\r\n _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default)\r\n\r\n if copy_constraints_to_ppc:\r\n _copy_p_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask)\r\n _replace_nans_with_default_p_limits_in_ppc(ppc, eg_end, gen_end, p_lim_default)\r\n\r\n # _build_gen_lookups(net, \"gen\", eg_end, gen_end)\r\n\r\n\r\ndef _build_pp_xward(net, ppc, gen_end, xw_end, q_lim_default, update_lookup=True):\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n xw = net[\"xward\"]\r\n xw_is = net[\"_is_elements\"]['xward']\r\n if update_lookup:\r\n ppc[\"gen\"][gen_end:xw_end, GEN_BUS] = bus_lookup[xw[\"ad_bus\"].values]\r\n ppc[\"gen\"][gen_end:xw_end, VG] = xw[\"vm_pu\"].values\r\n ppc[\"gen\"][gen_end:xw_end, GEN_STATUS] = xw_is\r\n ppc[\"gen\"][gen_end:xw_end, QMIN] = -q_lim_default\r\n ppc[\"gen\"][gen_end:xw_end, QMAX] = q_lim_default\r\n\r\n xward_buses = bus_lookup[net[\"xward\"][\"ad_bus\"].values]\r\n ppc[\"bus\"][xward_buses[xw_is], BUS_TYPE] = PV\r\n ppc[\"bus\"][xward_buses[~xw_is], BUS_TYPE] = NONE\r\n ppc[\"bus\"][xward_buses, VM] = net[\"xward\"][\"vm_pu\"].values\r\n\r\n\r\n\r\n\r\ndef _update_gen_ppc(net, ppc):\r\n '''\r\n Takes the ppc network and updates the gen values from the values in net.\r\n\r\n **INPUT**:\r\n **net** -The pandapower format network\r\n\r\n **ppc** - The PYPOWER format network to fill in values\r\n '''\r\n # get options from net\r\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n # get in service elements\r\n _is_elements = net[\"_is_elements\"]\r\n gen_is_mask = _is_elements['gen']\r\n # TODO maybe speed up things here, too\r\n eg_is = net[\"ext_grid\"][_is_elements['ext_grid']]\r\n gen_is = net[\"gen\"][_is_elements['gen']]\r\n\r\n eg_end = len(eg_is)\r\n gen_end = eg_end + len(gen_is)\r\n xw_end = gen_end + len(net[\"xward\"])\r\n\r\n q_lim_default = 1e9 # which is 1000 TW - should be enough for distribution grids.\r\n\r\n # add ext grid / slack data\r\n ext_grid_lookup = net[\"_pd2ppc_lookups\"][\"ext_grid\"]\r\n ext_grid_idx_ppc = ext_grid_lookup[eg_is.index]\r\n ppc[\"gen\"][ext_grid_idx_ppc, VG] = eg_is[\"vm_pu\"].values\r\n ppc[\"gen\"][ext_grid_idx_ppc, GEN_STATUS] = eg_is[\"in_service\"].values\r\n\r\n # set bus values for external grid buses\r\n if calculate_voltage_angles:\r\n # eg_buses = bus_lookup[eg_is[\"bus\"].values]\r\n ppc[\"bus\"][ext_grid_idx_ppc, VA] = eg_is[\"va_degree\"].values\r\n\r\n # add generator / pv data\r\n if gen_end > eg_end:\r\n gen_lookup = net[\"_pd2ppc_lookups\"][\"gen\"]\r\n gen_idx_ppc = gen_lookup[gen_is.index]\r\n ppc[\"gen\"][gen_idx_ppc, PG] = - gen_is[\"p_kw\"].values * 1e-3 * gen_is[\"scaling\"].values\r\n ppc[\"gen\"][gen_idx_ppc, VG] = gen_is[\"vm_pu\"].values\r\n\r\n # set bus values for generator buses\r\n gen_buses = bus_lookup[gen_is[\"bus\"].values]\r\n ppc[\"bus\"][gen_buses, VM] = gen_is[\"vm_pu\"].values\r\n\r\n _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask)\r\n _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default)\r\n\r\n # add extended ward pv node data\r\n if xw_end > gen_end:\r\n # ToDo: this must be tested in combination with recycle. Maybe the placement of the updated value in ppc[\"gen\"]\r\n # ToDo: is wrong. -> I'll better raise en error\r\n raise NotImplementedError(\"xwards in combination with recycle is not properly implemented\")\r\n # _build_pp_xward(net, ppc, gen_end, xw_end, q_lim_default,\r\n # update_lookup=False)\r\n\r\n\r\ndef _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask):\r\n # Note: Pypower has generator reference system, pandapower uses load reference\r\n # system (max <-> min)\r\n\r\n delta = net[\"_options\"][\"delta\"]\r\n\r\n if \"max_q_kvar\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, QMIN] = -net[\"gen\"][\"max_q_kvar\"].values[gen_is_mask] * 1e-3 - delta\r\n if \"min_q_kvar\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, QMAX] = -net[\"gen\"][\"min_q_kvar\"].values[gen_is_mask] * 1e-3 + delta\r\n\r\n\r\ndef _copy_p_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask):\r\n delta = net[\"_options\"][\"delta\"]\r\n\r\n if \"max_p_kw\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, PMIN] = -net[\"gen\"][\"max_p_kw\"].values[gen_is_mask] * 1e-3 + delta\r\n if \"min_p_kw\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, PMAX] = -net[\"gen\"][\"min_p_kw\"].values[gen_is_mask] * 1e-3 - delta\r\n\r\n\r\ndef _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default):\r\n # Note: Pypower has generator reference system, pandapower uses load reference system (max <-> min)\r\n max_q_kvar = ppc[\"gen\"][eg_end:gen_end, [QMIN]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=np.isnan(max_q_kvar))\r\n ppc[\"gen\"][eg_end:gen_end, [QMIN]] = max_q_kvar\r\n\r\n min_q_kvar = ppc[\"gen\"][eg_end:gen_end, [QMAX]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=np.isnan(min_q_kvar))\r\n ppc[\"gen\"][eg_end:gen_end, [QMAX]] = min_q_kvar\r\n\r\n\r\ndef _replace_nans_with_default_p_limits_in_ppc(ppc, eg_end, gen_end, p_lim_default):\r\n # Note: Pypower has generator reference system, pandapower uses load reference system (max <-> min)\r\n max_p_kw = ppc[\"gen\"][eg_end:gen_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][eg_end:gen_end, [PMIN]] = max_p_kw\r\n\r\n min_p_kw = ppc[\"gen\"][eg_end:gen_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][eg_end:gen_end, [PMAX]] = min_p_kw\r\n\r\n\r\ndef _check_voltage_setpoints_at_same_bus(ppc):\r\n # generator buses:\r\n gen_bus = ppc['gen'][:, GEN_BUS].astype(int)\r\n # generator setpoints:\r\n gen_vm = ppc['gen'][:, VG]\r\n if _different_values_at_one_bus(gen_bus, gen_vm):\r\n raise UserWarning(\"Generators with different voltage setpoints connected to the same bus\")\r\n\r\ndef _check_voltage_angles_at_same_bus(net, ppc):\r\n gen_va = net.ext_grid.va_degree[net._is_elements[\"ext_grid\"]].values\r\n eg_gens = net._pd2ppc_lookups[\"ext_grid\"][net.ext_grid.index[net._is_elements[\"ext_grid\"]]]\r\n gen_bus = ppc[\"gen\"][eg_gens, GEN_BUS].astype(int)\r\n if _different_values_at_one_bus(gen_bus, gen_va):\r\n raise UserWarning(\"Ext grids with different voltage angle setpoints connected to the same bus\")\r\n\r\n\r\ndef _different_values_at_one_bus(buses, values):\r\n \"\"\"\r\n checks if there are different values in any of the\r\n\r\n \"\"\"\r\n # buses with one or more generators and their index\r\n unique_bus, index_first_bus = np.unique(buses, return_index=True)\r\n\r\n # voltage setpoint lookup with the voltage of the first occurence of that bus\r\n first_values = -np.ones(buses.max() + 1)\r\n first_values[unique_bus] = values[index_first_bus]\r\n\r\n # generate voltage setpoints where all generators at the same bus\r\n # have the voltage of the first generator at that bus\r\n values_equal = first_values[buses]\r\n\r\n return not np.array_equal(values, values_equal)\r\n" ]
[ [ "numpy.sum", "numpy.zeros", "pandas.DataFrame", "numpy.array_equal", "numpy.array", "numpy.unique", "numpy.isnan" ] ]
miquelmn/nn_interpretability
[ "2b5d2b4102016189743e09f1f3a56f2ecddfde98" ]
[ "nn_interpretability/model/model_repository.py" ]
[ "import os\nimport torch\nfrom pathlib import Path\n\nfrom nn_interpretability.model.definition.am_mnist_classifier import AMCNN\nfrom nn_interpretability.model.definition.mc_dropout_cnn import CNN_Dropout\nfrom nn_interpretability.model.definition.general_mnist_cnn import GeneralCNN\nfrom nn_interpretability.model.definition.mnist_generator import MNISTGenerator\nfrom nn_interpretability.model.definition.mnist_discriminator import MNISTDiscriminator\nfrom nn_interpretability.model.definition.cam_mnist_classifier import CAMMNISTClassifier\nfrom nn_interpretability.model.definition.pretrained_dc_generator import PretrainedDCGANGenerator\nfrom nn_interpretability.model.definition.cam_mnist_classifier_2 import CAMMNISTExtendedClassifier\n\n\nclass ModelRepository:\n MODELS_PATH = str(Path(__file__).parent.parent.parent.joinpath('models')) + \"/\"\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n @staticmethod\n def get_general_mnist_cnn(path: str = None):\n model = GeneralCNN()\n\n if path is not None:\n if os.path.exists(ModelRepository.MODELS_PATH + path):\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def get_cnn_dropout(path: str = None):\n model = CNN_Dropout()\n\n if path is not None:\n if os.path.exists(ModelRepository.MODELS_PATH + path):\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n \n @staticmethod\n def get_cam_classifier(path: str = None):\n model = CAMMNISTClassifier()\n\n if path is not None:\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def get_cam_extended_classifier(path: str = None):\n model = CAMMNISTExtendedClassifier()\n\n if path is not None:\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def get_am_classifier(path: str = None):\n model = AMCNN()\n\n if path is not None:\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def get_pretrained_dcgan_generator():\n \"\"\"\n Source of the pretrained model is:\n\n https://github.com/csinva/gan-vae-pretrained-pytorch\n :return:\n \"\"\"\n path = 'pretrained_dcgan_generator.pth'\n\n model = PretrainedDCGANGenerator()\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def get_mnist_generator(latent_dim: int = 128, path: str = None):\n model = MNISTGenerator(latent_dim=latent_dim)\n\n if path is not None:\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def get_mnist_discriminator(path: str = None):\n model = MNISTDiscriminator()\n\n if path is not None:\n model = ModelRepository._load(model, path)\n\n return model.to(ModelRepository.device)\n\n @staticmethod\n def save(model, model_name):\n torch.save(model.state_dict(), ModelRepository.MODELS_PATH + model_name)\n return model\n\n @staticmethod\n def _load(model, model_name):\n model.load_state_dict(torch.load(ModelRepository.MODELS_PATH + model_name, map_location=ModelRepository.device))\n return model.to(ModelRepository.device)\n" ]
[ [ "torch.cuda.is_available", "torch.load" ] ]
tuahk/NiaPy
[ "c863d801fda8e1949a3ca716a4de7c7ca3d0ea16", "c863d801fda8e1949a3ca716a4de7c7ca3d0ea16" ]
[ "NiaPy/algorithms/basic/gso.py", "NiaPy/algorithms/basic/ba.py" ]
[ "# encoding=utf8\n# pylint: disable=mixed-indentation, trailing-whitespace, line-too-long, multiple-statements, attribute-defined-outside-init, logging-not-lazy, no-self-use, redefined-builtin, singleton-comparison, unused-argument, arguments-differ, no-else-return\nimport logging\nfrom scipy.spatial.distance import euclidean\nfrom numpy import full, apply_along_axis, argmin, copy, sum, inf, fmax, pi, where\nfrom NiaPy.algorithms.algorithm import Algorithm\n\nlogging.basicConfig()\nlogger = logging.getLogger('NiaPy.algorithms.basic')\nlogger.setLevel('INFO')\n\n__all__ = ['GlowwormSwarmOptimization', 'GlowwormSwarmOptimizationV1', 'GlowwormSwarmOptimizationV2', 'GlowwormSwarmOptimizationV3']\n\nclass GlowwormSwarmOptimization(Algorithm):\n\tr\"\"\"Implementation of glowwarm swarm optimization.\n\n\t**Algorithm:** Glowwarm Swarm Optimization Algorithm\n\n\t**Date:** 2018\n\n\t**Authors:** Klemen Berkovič\n\n\t**License:** MIT\n\n\t**Reference URL:** https://www.springer.com/gp/book/9783319515946\n\n\t**Reference paper:** Kaipa, Krishnanand N., and Debasish Ghose. Glowworm swarm optimization: theory, algorithms, and applications. Vol. 698. Springer, 2017.\n\t\"\"\"\n\tdef __init__(self, **kwargs):\n\t\tif kwargs.get('name', None) == None: Algorithm.__init__(self, name='GlowwormSwarmOptimization', sName='GSO', **kwargs)\n\t\telse: Algorithm.__init__(self, **kwargs)\n\n\tdef setParameters(self, n=25, l0=5, nt=5, rho=0.4, gamma=0.6, beta=0.08, s=0.03, **ukwargs):\n\t\tr\"\"\"Set the arguments of an algorithm.\n\n\t\t**Arguments:**\n\n\t\tn {integer} -- number of glowworms in population\n\n\t\tl0 {real} -- initial luciferin quantity for each glowworm\n\n\t\tnt {real} --\n\n\t\trs {real} -- maximum sensing range\n\n\t\trho {real} -- luciferin decay constant\n\n\t\tgamma {real} -- luciferin enhancement constant\n\n\t\tbeta {real} --\n\n\t\ts {real} --\n\t\t\"\"\"\n\t\tself.n, self.l0, self.nt, self.rho, self.gamma, self.beta, self.s = n, l0, nt, rho, gamma, beta, s\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))\n\n\tdef randMove(self, i):\n\t\tj = i\n\t\twhile i == j: j = self.randint(self.n)\n\t\treturn j\n\n\tdef getNeighbors(self, i, r, GS, L):\n\t\tN = full(self.n, 0)\n\t\tfor j, gw in enumerate(GS): N[j] = 1 if i != j and euclidean(GS[i], gw) <= r and L[i] >= L[j] else 0\n\t\treturn N\n\n\tdef probabilityes(self, i, N, L):\n\t\td, P = sum(L[where(N == 1)] - L[i]), full(self.n, .0)\n\t\tfor j in range(self.n): P[i] = ((L[j] - L[i]) / d) if N[j] == 1 else 0\n\t\treturn P\n\n\tdef moveSelect(self, pb, i):\n\t\tr, b_l, b_u = self.rand(), 0, 0\n\t\tfor j in range(self.n):\n\t\t\tb_l, b_u = b_u, b_u + pb[i]\n\t\t\tif b_l < r < b_u: return j\n\t\treturn self.randint(self.n)\n\n\tdef calcLuciferin(self, L, GS_f): return (1 - self.rho) * L + self.gamma * GS_f\n\n\tdef rangeUpdate(self, R, N, rs): return R + self.beta * (self.nt - sum(N))\n\n\tdef getBest(self, GS, GS_f, xb, xb_f):\n\t\tib = argmin(GS_f)\n\t\tif GS_f[ib] < xb_f: return GS[ib], GS_f[ib]\n\t\telse: return xb, xb_f\n\n\tdef runTask(self, task):\n\t\trs = euclidean(full(task.D, 0), task.bRange)\n\t\tGS, GS_f, L, R = self.uniform(task.Lower, task.Upper, [self.n, task.D]), full(self.n, inf), full(self.n, self.l0), full(self.n, rs)\n\t\txb, xb_f = None, inf\n\t\twhile not task.stopCondI():\n\t\t\tGSo, Ro, GS_f = copy(GS), copy(R), apply_along_axis(task.eval, 1, GS)\n\t\t\txb, xb_f = self.getBest(GS, GS_f, xb, xb_f)\n\t\t\tL = self.calcLuciferin(L, GS_f)\n\t\t\tN = [self.getNeighbors(i, Ro[i], GSo, L) for i in range(self.n)]\n\t\t\tP = [self.probabilityes(i, N[i], L) for i in range(self.n)]\n\t\t\tj = [self.moveSelect(P[i], i) for i in range(self.n)]\n\t\t\tfor i in range(self.n): GS[i] = task.repair(GSo[i] + self.s * ((GSo[j[i]] - GSo[i]) / (euclidean(GSo[j[i]], GSo[i]) + 1e-31)))\n\t\t\tfor i in range(self.n): R[i] = max(0, min(rs, self.rangeUpdate(Ro[i], N[i], rs)))\n\t\treturn xb, xb_f\n\nclass GlowwormSwarmOptimizationV1(GlowwormSwarmOptimization):\n\tr\"\"\"Implementation of glowwarm swarm optimization.\n\n\t**Algorithm:** Glowwarm Swarm Optimization Algorithm\n\n\t**Date:** 2018\n\n\t**Authors:** Klemen Berkovič\n\n\t**License:** MIT\n\n\t**Reference URL:** https://www.springer.com/gp/book/9783319515946\n\n\t**Reference paper:** Kaipa, Krishnanand N., and Debasish Ghose. Glowworm swarm optimization: theory, algorithms, and applications. Vol. 698. Springer, 2017.\n\t\"\"\"\n\tdef __init__(self, **kwargs): GlowwormSwarmOptimization.__init__(self, name='GlowwormSwarmOptimizationV1', sName='GSOv1', **kwargs)\n\n\tdef setParameters(self, **kwargs):\n\t\tself.__setParams(**kwargs)\n\t\tGlowwormSwarmOptimization.setParameters(self, **kwargs)\n\n\tdef __setParams(self, alpha=0.2, **ukwargs):\n\t\tr\"\"\"Set the arguments of an algorithm.\n\n\t\t**Arguments:**\n\n\t\talpha {real} --\n\t\t\"\"\"\n\t\tself.alpha = alpha\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))\n\n\tdef calcLuciferin(self, L, GS_f): return fmax(0, (1 - self.rho) * L + self.gamma * GS_f)\n\n\tdef rangeUpdate(self, R, N, rs): return rs / (1 + self.beta * (sum(N) / (pi * rs ** 2)))\n\nclass GlowwormSwarmOptimizationV2(GlowwormSwarmOptimization):\n\tr\"\"\"Implementation of glowwarm swarm optimization.\n\n\t**Algorithm:** Glowwarm Swarm Optimization Algorithm\n\n\t**Date:** 2018\n\n\t**Authors:** Klemen Berkovič\n\n\t**License:** MIT\n\n\t**Reference URL:** https://www.springer.com/gp/book/9783319515946\n\n\t**Reference paper:** Kaipa, Krishnanand N., and Debasish Ghose. Glowworm swarm optimization: theory, algorithms, and applications. Vol. 698. Springer, 2017.\n\t\"\"\"\n\tdef __init__(self, **kwargs): GlowwormSwarmOptimization.__init__(self, name='GlowwormSwarmOptimizationV2', sName='GSOv2', **kwargs)\n\n\tdef setParameters(self, **kwargs):\n\t\tself.__setParams(alpha=kwargs.pop('alpha', 0.2), **kwargs)\n\t\tGlowwormSwarmOptimization.setParameters(self, **kwargs)\n\n\tdef __setParams(self, alpha=0.2, **ukwargs):\n\t\tr\"\"\"Set the arguments of an algorithm.\n\n\t\t**Arguments:**\n\n\t\tbeta1 {real} --\n\n\t\ts {real} --\n\t\t\"\"\"\n\t\tself.alpha = alpha\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))\n\n\tdef rangeUpdate(self, P, N, rs): return self.alpha + (rs - self.alpha) / (1 + self.beta * sum(N))\n\nclass GlowwormSwarmOptimizationV3(GlowwormSwarmOptimization):\n\tr\"\"\"Implementation of glowwarm swarm optimization.\n\n\t**Algorithm:** Glowwarm Swarm Optimization Algorithm\n\n\t**Date:** 2018\n\n\t**Authors:** Klemen Berkovič\n\n\t**License:** MIT\n\n\t**Reference URL:** https://www.springer.com/gp/book/9783319515946\n\n\t**Reference paper:** Kaipa, Krishnanand N., and Debasish Ghose. Glowworm swarm optimization: theory, algorithms, and applications. Vol. 698. Springer, 2017.\n\t\"\"\"\n\tdef __init__(self, **kwargs): GlowwormSwarmOptimization.__init__(self, name='GlowwormSwarmOptimizationV2', sName='GSOv2', **kwargs)\n\n\tdef setParameters(self, **kwargs):\n\t\tself.__setParams(beta1=kwargs.pop('beta1', 0.2), **kwargs)\n\t\tGlowwormSwarmOptimization.setParameters(self, **kwargs)\n\n\tdef __setParams(self, beta1=0.2, **ukwargs):\n\t\tr\"\"\"Set the arguments of an algorithm.\n\n\t\t**Arguments:**\n\n\t\tbeta1 {real} --\n\n\t\ts {real} --\n\t\t\"\"\"\n\t\tself.beta1 = beta1\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))\n\n\tdef rangeUpdate(self, R, N, rs): return R + (self.beta * sum(N)) if sum(N) < self.nt else (-self.beta1 * sum(N))\n\n# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3\n", "# encoding=utf8\n# pylint: disable=mixed-indentation, multiple-statements, attribute-defined-outside-init, logging-not-lazy, no-self-use, line-too-long, singleton-comparison, arguments-differ\nimport logging\nfrom numpy import full, apply_along_axis, argmin\nfrom NiaPy.algorithms.algorithm import Algorithm\n\nlogging.basicConfig()\nlogger = logging.getLogger('NiaPy.algorithms.basic')\nlogger.setLevel('INFO')\n\n__all__ = ['BatAlgorithm']\n\nclass BatAlgorithm(Algorithm):\n\tr\"\"\"Implementation of Bat algorithm.\n\n\t**Algorithm:** Bat algorithm\n\n\t**Date:** 2015\n\n\t**Authors:** Iztok Fister Jr., Marko Burjek and Klemen Berkovič\n\n\t**License:** MIT\n\n\t**Reference paper:**\n\tYang, Xin-She. \"A new metaheuristic bat-inspired algorithm.\"\n\tNature inspired cooperative strategies for optimization (NICSO 2010).\n\tSpringer, Berlin, Heidelberg, 2010. 65-74.\n\t\"\"\"\n\tdef __init__(self, **kwargs):\n\t\tr\"\"\"**__init__(self, D, NP, nFES, A, r, Qmin, Qmax, benchmark)**.\n\n\t\t**See**:\n\t\tAlgorithm.__init__(self, **kwargs)\n\t\t\"\"\"\n\t\tif kwargs.get('name', None) == None: Algorithm.__init__(self, name=kwargs.get('name', 'BatAlgorithm'), sName=kwargs.get('sName', 'BA'), **kwargs)\n\t\telse: Algorithm.__init__(self, **kwargs)\n\n\tdef setParameters(self, NP, A, r, Qmin, Qmax, **ukwargs):\n\t\tr\"\"\"Set the parameters of the algorithm.\n\n\t\t**Arguments:**\n\n\t\tNP {integer} -- population size\n\n\t\tA {decimal} -- loudness\n\n\t\tr {decimal} -- pulse rate\n\n\t\tQmin {decimal} -- minimum frequency\n\n\t\tQmax {decimal} -- maximum frequency\n\t\t\"\"\"\n\t\tself.NP, self.A, self.r, self.Qmin, self.Qmax = NP, A, r, Qmin, Qmax\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))\n\n\tdef runTask(self, task):\n\t\tr\"\"\"Run algorithm with initialized parameters.\n\n\t\t**Return:**\n\n\t\t{decimal} -- coordinates of minimal found objective function\n\n\t\t{decimal} -- minimal value found of objective function\n\t\t\"\"\"\n\t\tS, Q, v = full([self.NP, task.D], 0.0), full(self.NP, 0.0), full([self.NP, task.D], 0.0)\n\t\tSol = task.Lower + task.bRange * self.uniform(0, 1, [self.NP, task.D])\n\t\tFitness = apply_along_axis(task.eval, 1, Sol)\n\t\tj = argmin(Fitness)\n\t\tbest, f_min = Sol[j], Fitness[j]\n\t\twhile not task.stopCond():\n\t\t\tfor i in range(self.NP):\n\t\t\t\tQ[i] = self.Qmin + (self.Qmax - self.Qmin) * self.uniform(0, 1)\n\t\t\t\tv[i] = v[i] + (Sol[i] - best) * Q[i]\n\t\t\t\tS[i] = Sol[i] + v[i]\n\t\t\t\tS[i] = task.repair(S[i])\n\t\t\t\tif self.rand() > self.r:\n\t\t\t\t\tS[i] = best + 0.001 * self.normal(0, 1, task.D)\n\t\t\t\t\tS[i] = task.repair(S[i])\n\t\t\t\tFnew = task.eval(S[i])\n\t\t\t\tif (Fnew <= Fitness[i]) and (self.rand() < self.A): Sol[i], Fitness[i] = S[i], Fnew\n\t\t\t\tif Fnew <= f_min: best, f_min = S[i], Fnew\n\t\treturn best, f_min\n\n# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3\n" ]
[ [ "numpy.sum", "numpy.argmin", "scipy.spatial.distance.euclidean", "numpy.copy", "numpy.apply_along_axis", "numpy.fmax", "numpy.where", "numpy.full" ], [ "numpy.argmin", "numpy.full", "numpy.apply_along_axis" ] ]
d-ks/gym_torcs_kai
[ "b9e1659a18ea8a788d0c6aeb7b1111c0284b23ac" ]
[ "gt_kai.py" ]
[ "# Gym-TORCS-Kai Environment for Reinforcement Learning in TORCS\n# original author : Naoto Yoshida\n# (https://github.com/ugo-nama-kun/gym_torcs)\n# modified version author : Daiko Kishikawa\n#\n# This environment is under modification. (2019.12)\n#\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\n\nimport numpy as np\n\nimport sys\n\nsys.path.append(\"./gym_torcs_kai\")\n\nimport snakeoil3_gym as snakeoil3\n\nimport os\nimport time\n\n\nclass TorcsKaiEnv(gym.Env):\n \n # the speed limit starts when the number of steps exceeds this\n terminal_judge_start = 500\n\n # episode terminates when the car is running slower than this limit\n termination_limit_progress = 5\n \n # whether to initialize when resetting the environment\n initial_reset = True\n\n def __init__(self, throttle=False, gear_change=False):\n \n print(\"=== Hello, this is Gym-TORCS-Kai. ===\")\n \n ############################ PARAMETERS OF DRIVING ############################\n \"\"\" throttle (bool) : usage of the throttle control in TORCS. \"\"\"\n \"\"\" gear_change (bool) : usage of the gear control in TORCS. \"\"\"\n \"\"\" obsdim (int) : the number of observation (state input) dimensions.\"\"\"\n # Currently, three types of dimensions are supported: \"2\", \"31\", \"79\".\n # \"2\" : the minimum number of dimensions required for driving.\n # \"31\" : the number of dimensions required for a single agent to drive normally.\n # \"79\" : the number of dimensions using all available inputs.\n \"\"\" maximum_distance (float) : the maximum distance when finish driving. \"\"\"\n \"\"\" default_speed (float) : the target speed for acceleration/deceleration. \"\"\"\n \n self.throttle = throttle\n self.gear_change = gear_change\n \n self.obsdim = 31\n self.maximum_distance = 1908.32\n self.default_speed = 100\n \n ##################################################################################\n \n print(\"--> throttle : \", self.throttle)\n print(\"--> gear : \", self.gear_change)\n print(\"--> dim. of observ. : \", self.obsdim)\n print(\"--> max. dist. : \", self.maximum_distance, \" m\")\n print(\"--> targ. speed : \", self.default_speed, \"km/h\")\n \n # Initialization of the driving in TORCS.\n self.initial_run = True\n \n # variable for calculating Y-axis acceleration\n self.speedY = 0\n self.time = 0\n \n # variable for recording the current number of steps\n self.time_step = 0\n \n # the range of reward function\n self.reward_range = (-10, 10)\n\n self.testmode = False\n\n # lists for recording vehicle status\n self.Yaclist = []\n self.poshis = []\n self.anglehis = []\n self.sphis = []\n \n # launch TORCS system\n os.system(\"pkill torcs\")\n time.sleep(0.5)\n\n if self.obsdim == 79:\n os.system(\"torcs &\")\n else:\n os.system(\"torcs -nofuel -nodamage -nolaptime &\")\n time.sleep(0.5)\n os.system(\"sh ./gym_torcs_kai/autostart.sh\")\n time.sleep(0.5)\n\n \"\"\"\n # Modify here if you use multiple tracks in the environment\n self.client = snakeoil3.Client(p=3101, vision=False) # Open new UDP in vtorcs\n self.client.MAX_STEPS = np.inf\n client = self.client\n client.get_servers_input() # Get the initial input from torcs\n obs = client.S.d # Get the current full-observation from torcs\n \"\"\"\n \n # definitions of action space ranges\n if throttle is False:\n self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,))\n else:\n self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,))\n \n # definitions of observation space ranges\n if self.obsdim == 79:\n high = np.array([np.pi, # angle\n np.inf, # curLapTime\n np.inf, # damage\n np.inf, # distFromStart\n np.inf, # distRaced\n\n # focus (5 dim.)\n 200, 200, 200, 200, 200,\n\n np.inf, # fuel\n 6, # gear\n np.inf, # lastLapTime\n\n # opponents (36 dim.)\n 200, 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200, 200,\n\n np.inf, # racePos\n np.inf, # rpm\n np.inf, # speedX\n np.inf, # speedY\n np.inf, # speedZ\n\n # track (19 dim.)\n 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200,\n 200, 200, 200, 200,\n\n np.inf, # trackPos\n\n # wheelSpinVel (4 dim.)\n np.inf, np.inf, np.inf, np.inf,\n\n np.inf, # z\n ])\n\n low = np.array([-np.pi, # angle\n 0, # curLapTime\n 0, # damage\n 0, # distFromStart\n 0, # distRaced\n\n # focus (5 dim.)\n 0, 0, 0, 0, 0,\n\n 0, # fuel\n -1, # gear\n 0, # lastLapTime\n\n # opponents (36 dim.)\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n\n 1, # racePos\n 0, # rpm\n -np.inf, # speedX\n -np.inf, # speedY\n -np.inf, # speedZ\n\n # track (19 dim.)\n 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0,\n 0, 0, 0, 0,\n\n -np.inf, # trackPos\n\n # wheelSpinVel (4 dim.)\n 0, 0, 0, 0,\n\n -np.inf, # z\n ])\n\n elif self.obsdim == 2:\n high = np.array([np.pi, # angle\n np.inf]) # trackPos\n\n low = np.array([-np.pi, # angle\n -np.inf]) # trackPos\n\n elif self.obsdim == 31:\n\n high = np.array([np.pi, # angle\n 6, # gear\n np.inf, # rpm\n np.inf, # speedX\n np.inf, # speedY\n np.inf, # speedZ\n # track (19 dim.)\n 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200,\n 200, 200, 200, 200, 200,\n 200, 200, 200, 200,\n np.inf, # trackPos\n # wheelSpinVel (4 dim.)\n np.inf, np.inf, np.inf, np.inf,\n np.inf, # z\n ])\n\n low = np.array([-np.pi, # angle\n -1, # gear\n 0, # rpm\n -np.inf, # speedX\n -np.inf, # speedY\n -np.inf, # speedZ\n # track (19 dim.)\n 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0,\n 0, 0, 0, 0,\n -np.inf, # trackPos\n # wheelSpinVel (4 dim.)\n 0, 0, 0, 0,\n -np.inf, # z\n ])\n else:\n low = None\n high = None\n\n self.observation_space = spaces.Box(low=low, high=high)\n\n # For evaluation episodes, set to “test mode” to not display logs.\n def testset(self, test):\n self.testmode = test\n\n # Set learning parameter\n def set_params(self, throttle, gear, dim, max_dist, targ_speed):\n #params: [throttle, gear, dim, max_dist, targ_speed]\n self.throttle = throttle\n self.gear_change = gear\n self.obsdim = dim\n self.maximum_distance = max_dist\n self.default_speed = targ_speed\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n # \"step\" function\n def step(self, u):\n # convert thisAction to the actual torcs actionstr\n client = self.client\n\n this_action = self.agent_to_torcs(u)\n\n # apply actions in TORCS\n action_torcs = client.R.d\n\n # steering control from the agent\n action_torcs[\"steer\"] = this_action[\"steer\"] # in [-1, 1]\n\n # simple automatic throttle control by Snakeoil\n if self.throttle is False:\n target_speed = self.default_speed\n if client.S.d[\"speedX\"] < target_speed - (client.R.d[\"steer\"] * 50):\n if client.R.d[\"accel\"] + 0.1 <= 1:\n client.R.d[\"accel\"] += 0.1\n else:\n if client.R.d[\"accel\"] - 0.1 >= 0:\n client.R.d[\"accel\"] -= 0.1\n\n if client.S.d[\"speedX\"] < 10:\n if (client.S.d[\"speedX\"] + 0.1) != 0:\n client.R.d[\"accel\"] += 1 / (client.S.d[\"speedX\"] + 0.1)\n\n # traction control system\n if (client.S.d[\"wheelSpinVel\"][2] + client.S.d[\"wheelSpinVel\"][3]) - (\n client.S.d[\"wheelSpinVel\"][0] + client.S.d[\"wheelSpinVel\"][1]\n ) > 5:\n action_torcs[\"accel\"] -= 0.2\n else:\n action_torcs[\"accel\"] = this_action[\"accel\"]\n\n # gear control from agent\n if self.gear_change is True:\n action_torcs[\"gear\"] = this_action[\"gear\"]\n else:\n # automatic gear control\n action_torcs[\"gear\"] = 1\n if client.S.d[\"speedX\"] > 50:\n action_torcs[\"gear\"] = 2\n if client.S.d[\"speedX\"] > 80:\n action_torcs[\"gear\"] = 3\n if client.S.d[\"speedX\"] > 110:\n action_torcs[\"gear\"] = 4\n if client.S.d[\"speedX\"] > 140:\n action_torcs[\"gear\"] = 5\n if client.S.d[\"speedX\"] > 170:\n action_torcs[\"gear\"] = 6\n\n # one-step dynamics update #################################\n # apply actions into TORCS\n client.respond_to_server()\n # get the response from TORCS\n client.get_servers_input()\n\n # get the current full-observation from TORCS\n obs = client.S.d\n\n # make an observation from a raw observation vector from TORCS\n self.observation = self.make_observaton(obs)\n \n # calculation of progress\n progress = np.array(obs[\"speedX\"]) * np.cos(obs[\"angle\"])\n\n # Designed Reward Function #######################################\n # This reward function enables agents to learn stable high-speed driving\n # with low Y-axis acceleration.\n # This reward function was designed after trial and error by me.\n\n if (obs[\"curLapTime\"] - self.time) > 0:\n Yac = (obs[\"speedY\"] - self.speedY) / (obs[\"curLapTime\"] - self.time)\n else:\n Yac = 0\n\n self.speedY = obs[\"speedY\"]\n self.time = obs[\"curLapTime\"]\n self.Yaclist.append(Yac)\n\n self.poshis.append(obs[\"trackPos\"])\n self.anglehis.append(obs[\"angle\"])\n self.sphis.append(obs[\"speedX\"])\n\n # reward for the low Y-axis acceleration\n eta_Yac = 1\n r_Yac = 1 / ((Yac / eta_Yac) ** 2 + 1)\n\n # reward for the small angle : 0 ~ 1\n eta_angle = 0.01\n r_angle = 1 / ((obs[\"angle\"] / eta_angle) ** 2 + 1)\n\n # reward for the small position from center : 0 ~ 1\n eta_pos = 0.01\n r_trackPos = 1 / ((obs[\"trackPos\"] / eta_pos) ** 2 + 1)\n\n # reward for the high X-axis speed : 0 ~ 1\n maxspeed = 100\n if obs[\"speedX\"] >= 0:\n r_speed = min(obs[\"speedX\"] / maxspeed, 1)\n else:\n r_speed = 0\n\n # reward function: -1 ~ 1\n reward = 0.2 * r_angle + 0.2 * r_trackPos + 0.3 * r_speed + 0.3 * r_Yac\n\n Yac_threshold = 3.530394 # 0.1G\n if np.abs(Yac) > Yac_threshold:\n reward = -min(np.abs(Yac) / 250, 1)\n\n # Termination judgement #########################\n track = np.array(obs[\"track\"])\n # episode terminates when the car is out of track\n if track.min() < 0:\n reward = -10\n client.R.d[\"meta\"] = True\n\n # episode terminates if the progress of agent is little\n if self.terminal_judge_start < self.time_step:\n if progress < self.termination_limit_progress:\n reward = -10\n client.R.d[\"meta\"] = True\n\n # episode terminates if the agent runs backward\n if np.cos(obs[\"angle\"]) < 0 or obs[\"distRaced\"] < 0:\n reward = -10\n client.R.d[\"meta\"] = True\n\n # episode terminates when the agent reaches the maximum distance\n if obs[\"distRaced\"] >= self.maximum_distance:\n reward = 10\n client.R.d[\"meta\"] = True\n\n if client.R.d[\"meta\"] is True: # send a reset signal\n poshis = np.array(self.poshis)\n anglehis = np.array(self.anglehis)\n sphis = np.array(self.sphis)\n Yachis = np.array(self.Yaclist)\n \n # For training episodes, display information about the vehicle in the finished driving\n if self.testmode == False:\n print(\"---------------------------------------------------------\")\n print(\"---> raced: \", obs[\"distRaced\"], \" m <---\")\n print(\"--- maxYac: \", np.max(Yachis), \" km/h/s ---\")\n print(\"--- minYac: \", np.min(Yachis), \" km/h/s ---\")\n if abs(np.max(Yachis)) >= abs(np.min(Yachis)):\n absmaxYac = abs(np.max(Yachis))\n else:\n absmaxYac = abs(np.min(Yachis))\n print(\"--- absmaxYac: \", absmaxYac, \" km/h/s ---\")\n print(\"--- meanYac: \", np.mean(Yachis), \" km/h/s +- \", np.std(Yachis), \"---\")\n print(\"--- medianYac: \", np.median(Yachis), \" km/h/s ---\")\n print(\"--- trackPos_mean: \", np.mean(poshis), \" +- \", np.std(poshis), \" ---\")\n print(\"--- angle_mean : \", np.mean(anglehis), \" rad +- \", np.std(anglehis), \" ---\")\n print(\"--- speedX_mean: \", np.mean(sphis), \" km/h +- \", np.std(sphis), \" ---\")\n print(\"---------------------------------------------------------\")\n \n self.initial_run = False\n client.respond_to_server()\n\n self.time_step += 1\n\n return self.get_obs(), reward, client.R.d[\"meta\"], {}\n\n def reset(self, relaunch=False):\n\n self.time_step = 0\n \n # If not true, send a reset signal to TORCS when the reset function is called\n if self.initial_reset is not True:\n self.client.R.d[\"meta\"] = True\n self.client.respond_to_server()\n\n ## TENTATIVE. Restarting TORCS for every episode will cause the memory leak bug!\n if relaunch is True:\n self.reset_torcs()\n\n # Modify here if you use multiple tracks in the environment\n # Open new UDP in vtorcs\n self.client = snakeoil3.Client(p=3101, vision=False) \n\n self.client.MAX_STEPS = np.inf\n\n client = self.client\n \n # get the initial input from TORCS\n client.get_servers_input()\n\n # get the current full observation from TORCS\n obs = client.S.d\n self.observation = self.make_observaton(obs)\n\n # reset variables and lists\n self.speedY = obs[\"speedY\"]\n self.time = obs[\"curLapTime\"]\n\n self.Yaclist = []\n self.poshis = []\n self.anglehis = []\n self.sphis = []\n\n self.initial_reset = False\n return self.get_obs()\n\n def close(self):\n os.system(\"pkill torcs\")\n\n def render(self, mode=\"human\"):\n # TORCS has a monitor of driving, so this method omitted.\n pass\n\n ####################################### making observation ############################################\n\n def get_obs(self):\n return self.observation\n\n def reset_torcs(self):\n os.system(\"pkill torcs\")\n time.sleep(0.5)\n\n if self.obsdim == 79:\n os.system(\"torcs &\")\n elif self.obsdim == 2:\n os.system(\"torcs -nofuel -nodamage -nolaptime &\")\n else:\n os.system(\"torcs -nofuel -nodamage -nolaptime &\")\n\n time.sleep(0.5)\n os.system(\"sh ./gym_torcs_kai/autostart.sh\")\n time.sleep(0.5)\n\n def agent_to_torcs(self, u):\n torcs_action = {\"steer\": u[0]}\n\n if self.throttle is True: # throttle action is enabled\n torcs_action.update({\"accel\": u[1]})\n\n if self.gear_change is True: # gear change action is enabled\n torcs_action.update({\"gear\": u[2]})\n\n return torcs_action\n\n def make_observaton(self, raw_obs):\n if self.obsdim == 79:\n obs1 = np.array(\n [\n raw_obs[\"angle\"],\n raw_obs[\"curLapTime\"],\n raw_obs[\"damage\"],\n raw_obs[\"distFromStart\"],\n raw_obs[\"distRaced\"],\n ]\n )\n focus = raw_obs[\"focus\"]\n obs2 = np.array([raw_obs[\"fuel\"], raw_obs[\"gear\"], raw_obs[\"lastLapTime\"]])\n opponents = raw_obs[\"opponents\"]\n obs3 = np.array(\n [\n raw_obs[\"racePos\"],\n raw_obs[\"rpm\"],\n raw_obs[\"speedX\"],\n raw_obs[\"speedY\"],\n raw_obs[\"speedZ\"],\n ]\n )\n track = raw_obs[\"track\"]\n trackPos = np.array([raw_obs[\"trackPos\"]])\n wheelSpinVel = raw_obs[\"wheelSpinVel\"]\n z = np.array(raw_obs[\"z\"])\n observ = np.hstack(\n [obs1, focus, obs2, opponents, obs3, track, trackPos, wheelSpinVel, z]\n )\n return observ\n\n elif self.obsdim == 2:\n return np.array([raw_obs[\"angle\"], raw_obs[\"trackPos\"]])\n\n elif self.obsdim == 31:\n\n obs1 = np.array(\n [\n raw_obs[\"angle\"],\n raw_obs[\"gear\"],\n raw_obs[\"rpm\"],\n raw_obs[\"speedX\"],\n raw_obs[\"speedY\"],\n raw_obs[\"speedZ\"],\n ]\n )\n\n trackPos = np.array([raw_obs[\"trackPos\"]])\n z = np.array(raw_obs[\"z\"])\n\n observ = np.hstack(\n [obs1, raw_obs[\"track\"], trackPos, raw_obs[\"wheelSpinVel\"], z]\n )\n\n return observ\n\n else:\n return None\n" ]
[ [ "numpy.abs", "numpy.cos", "numpy.median", "numpy.hstack", "numpy.max", "numpy.min", "numpy.array", "numpy.std", "numpy.mean" ] ]
samgregoost/self_supervised_large
[ "9c0c33cf374a1d5112519939012a64bca98c5f8d" ]
[ "mnist128.py" ]
[ "from __future__ import print_function\nimport tensorflow as tf\nimport numpy as np\nimport random\nimport TensorflowUtils as utils\nimport read_MITSceneParsingDataParis as scene_parsing\nimport datetime\nimport BatchDatsetReader as dataset\nfrom six.moves import xrange\n\nFLAGS = tf.flags.FLAGS\ntf.flags.DEFINE_integer(\"batch_size\", \"50\", \"batch size for training\")\ntf.flags.DEFINE_string(\"logs_dir\", \"/scratch1/ram095/nips20/logs_mnist128/\", \"path to logs directory\")\ntf.flags.DEFINE_string(\"data_dir\", \"/scratch1/ram095/nips20/paris_street\", \"path to dataset\")\ntf.flags.DEFINE_float(\"learning_rate\", \"1e-4\", \"Learning rate for Adam Optimizer\")\ntf.flags.DEFINE_string(\"model_dir\", \"Model_zoo/\", \"Path to vgg model mat\")\ntf.flags.DEFINE_bool('debug', \"False\", \"Debug mode: True/ False\")\ntf.flags.DEFINE_string('mode', \"train\", \"Mode train/ test/ visualize\")\n\nMODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'\n\nMAX_ITERATION = int(1e5 + 1)\nNUM_OF_CLASSESS = 3\nIMAGE_SIZE = 128\n\n\ndef vgg_net(weights, image):\n layers = (\n 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',\n\n 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',\n\n 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',\n 'relu3_3', 'conv3_4', 'relu3_4', 'pool3',\n\n 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',\n 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',\n\n 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',\n 'relu5_3', 'conv5_4', 'relu5_4'\n )\n\n net = {}\n current = image\n for i, name in enumerate(layers):\n kind = name[:4]\n if kind == 'conv':\n kernels, bias = weights[i][0][0][0][0]\n # matconvnet: weights are [width, height, in_channels, out_channels]\n # tensorflow: weights are [height, width, in_channels, out_channels]\n kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias)\n elif kind == 'relu':\n current = tf.nn.relu(current, name=name)\n if FLAGS.debug:\n utils.add_activation_summary(current)\n elif kind == 'pool':\n current = utils.avg_pool_2x2(current)\n net[name] = current\n\n return net\n\n'''\ndef decoder(image):\n model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)\n\n mean = model_data['normalization'][0][0][0]\n mean_pixel = np.mean(mean, axis=(0, 1))\n\n weights = np.squeeze(model_data['layers'])\n\n processed_image = utils.process_image(image, mean_pixel)\n with tf.variable_scope(\"decoder\"):\n image_net = vgg_net(weights, processed_image)\n conv_final_layer = image_net[\"conv5_3\"]\n\n pool5 = utils.max_pool_2x2(conv_final_layer)\n \n return pool5\n\n\n'''\n \n\n\ndef inference(image, keep_prob,z):\n \"\"\"\n Semantic segmentation network definition\n :param image: input image. Should have values in range 0-255\n :param keep_prob:\n :return:\n \"\"\"\n print(\"setting up vgg initialized conv layers ...\")\n model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)\n\n mean = model_data['normalization'][0][0][0]\n mean_pixel = np.mean(mean, axis=(0, 1))\n\n weights = np.squeeze(model_data['layers'])\n\n processed_image = utils.process_image(image, mean_pixel)\n\n with tf.variable_scope(\"inference\"):\n image_net = vgg_net(weights, processed_image)\n conv_final_layer = image_net[\"conv5_3\"]\n\n pool5 = utils.max_pool_2x2(conv_final_layer)\n\n W6 = utils.weight_variable([7, 7, 512, 4096], name=\"W6\")\n b6 = utils.bias_variable([4096], name=\"b6\")\n conv6 = utils.conv2d_basic(pool5, W6, b6)\n relu6 = tf.nn.relu(conv6, name=\"relu6\")\n if FLAGS.debug:\n utils.add_activation_summary(relu6)\n relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)\n\n W7 = utils.weight_variable([1, 1, 4096, 4096], name=\"W7\")\n b7 = utils.bias_variable([4096], name=\"b7\")\n conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)\n relu7 = tf.nn.relu(conv7, name=\"relu7\")\n if FLAGS.debug:\n utils.add_activation_summary(relu7)\n relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)\n\n W8 = utils.weight_variable([1, 1, 4096, 150], name=\"W8\")\n b8 = utils.bias_variable([150], name=\"b8\")\n\t\n # W_h = utils.weight_variable([1, 7, 7, 4], name=\"Wh\")\n conv8 = tf.reshape(utils.conv2d_basic(relu_dropout7, W8, b8),[-1,4*4*150])\n fc1 = tf.reshape(tf.layers.dense(conv8,4*4*150,activation = tf.nn.relu),[-1,4,4,150])\n \n \n\n concat1 = tf.concat([fc1, z],axis = 3)\n # annotation_pred1 = tf.argmax(conv8, dimension=3, name=\"prediction1\")\n print(\"###########################################################\")\n print(fc1)\n # now to upscale to actual image size\n deconv_shape1 = image_net[\"pool4\"].get_shape()\n W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, 278], name=\"W_t1\")\n b_t1 = utils.bias_variable([deconv_shape1[3].value], name=\"b_t1\")\n conv_t1 = utils.conv2d_transpose_strided(concat1, W_t1, b_t1, output_shape=tf.shape(image_net[\"pool4\"]))\n fuse_1 = tf.add(conv_t1, image_net[\"pool4\"], name=\"fuse_1\")\n\n deconv_shape2 = image_net[\"pool3\"].get_shape()\n W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name=\"W_t2\")\n b_t2 = utils.bias_variable([deconv_shape2[3].value], name=\"b_t2\")\n conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net[\"pool3\"]))\n fuse_2 = tf.add(conv_t2, image_net[\"pool3\"], name=\"fuse_2\")\n\n shape = tf.shape(image)\n deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], 3])\n W_t3 = utils.weight_variable([16, 16, 3, deconv_shape2[3].value], name=\"W_t3\")\n b_t3 = utils.bias_variable([3], name=\"b_t3\")\n conv_t3 = tf.nn.relu(utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8))\n\n annotation_pred = tf.argmax(conv_t3, dimension=3, name=\"prediction\")\n\n return tf.expand_dims(annotation_pred, dim=3), conv_t3\n\n\ndef train(loss_val, var_list):\n optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)\n grads = optimizer.compute_gradients(loss_val, var_list=var_list)\n if FLAGS.debug:\n # print(len(var_list))\n for grad, var in grads:\n utils.add_gradient_summary(grad, var)\n return optimizer.apply_gradients(grads)\n\ndef train_z(loss,Z):\n return tf.gradients(ys = loss, xs = Z)\n\n\ndef main(argv=None):\n keep_probability = tf.placeholder(tf.float32, name=\"keep_probabilty\")\n image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name=\"input_image\")\n annotation = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name=\"annotation\")\n z = tf.placeholder(tf.float32, shape=[None, 4, 4, 128], name=\"z\")\n\n # pred_annotation, logits = inference(image, keep_probability,z)\n # tf.summary.image(\"input_image\", image, max_outputs=2)\n # tf.summary.image(\"ground_truth\", tf.cast(annotation, tf.uint8), max_outputs=2)\n # tf.summary.image(\"pred_annotation\", tf.cast(pred_annotation, tf.uint8), max_outputs=2)\n# loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,\n # labels=tf.squeeze(annotation, squeeze_dims=[3]),\n # name=\"entropy\")))\n \n \n mask_ = tf.ones([FLAGS.batch_size,64,64,3])\n mask = tf.pad(mask_, [[0,0],[32,32],[32,32],[0,0]])\n\n mask2__ = tf.ones([FLAGS.batch_size,78,78,3])\n mask2_ = tf.pad(mask2__, [[0,0],[25,25],[25,25],[0,0]])\n mask2 = mask2_ - mask\n\n pred_annotation, logits = inference((1-mask)*image + mask*255, keep_probability,z)\n\n tf.summary.image(\"input_image\", image, max_outputs=2)\n tf.summary.image(\"ground_truth\", tf.cast(annotation, tf.uint8), max_outputs=2)\n tf.summary.image(\"pred_annotation\", tf.cast(pred_annotation, tf.uint8), max_outputs=2)\n\n # loss0 = tf.reduce_mean(tf.abs(z))\n loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square((image - logits)),[1,2,3])))\n # loss2 = tf.reduce_mean(tf.square((image - logits)*mask2))\n # loss = loss1 + loss2 + loss0\n # loss = tf.reduce_mean(tf.squared_difference(logits ,annotation ))\n loss_summary = tf.summary.scalar(\"entropy\", loss)\n \n grads = train_z(loss,z) \n\n trainable_var = tf.trainable_variables()\n if FLAGS.debug:\n for var in trainable_var:\n utils.add_to_regularization_and_summary(var)\n train_op = train(loss, trainable_var)\n\n print(\"Setting up summary op...\")\n summary_op = tf.summary.merge_all()\n\n print(\"Setting up image reader...\")\n train_records, valid_records = scene_parsing.read_dataset(FLAGS.data_dir)\n print(len(train_records))\n print(len(valid_records))\n\n print(\"Setting up dataset reader\")\n image_options = {'resize': True, 'resize_size': IMAGE_SIZE}\n if FLAGS.mode == 'train':\n train_dataset_reader = dataset.BatchDatset(train_records, image_options)\n validation_dataset_reader = dataset.BatchDatset(valid_records, image_options)\n\n sess = tf.Session()\n\n print(\"Setting up Saver...\")\n saver = tf.train.Saver()\n\n # create two summary writers to show training loss and validation loss in the same graph\n # need to create two folders 'train' and 'validation' inside FLAGS.logs_dir\n train_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/train', sess.graph)\n validation_writer = tf.summary.FileWriter(FLAGS.logs_dir + '/validation')\n\n sess.run(tf.global_variables_initializer())\n ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\"Model restored...\")\n\n if FLAGS.mode == \"train\":\n for itr in xrange(MAX_ITERATION):\n \n train_images, train_annotations = train_dataset_reader.next_batch(FLAGS.batch_size)\n z_ = np.random.uniform(low=-1.0, high=1.0, size=(FLAGS.batch_size,4,4,128))\n # print(train_images)\n feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85, z: z_}\n #train_images[:,50:100,50:100,:] =0\n v = 0\n \n for p in range(10):\n z_ol = np.copy(z_)\n # print(\"666666666666666666666666666666666666666\")\n z_loss, summ = sess.run([loss,loss_summary], feed_dict=feed_dict)\n print(\"Step: %d, z_step: %d, Train_loss:%g\" % (itr,p,z_loss))\n# print(z_) \n g = sess.run([grads],feed_dict=feed_dict)\n v_prev = np.copy(v)\n # print(g[0][0].shape)\n v = 0.001*v - 0.1*g[0][0]\n z_ += 0.001 * v_prev + (1+0.001)*v\n # z_ = np.clip(z_, -1.0, 1.0)\n # print(v.shape)\n # print(z_.shape)\n feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85, z: z_}\n sess.run(train_op, feed_dict=feed_dict)\n\n if itr % 10 == 0:\n train_loss, summary_str = sess.run([loss, loss_summary], feed_dict=feed_dict)\n print(\"Step: %d, Train_loss:%g\" % (itr, train_loss))\n \n train_writer.add_summary(summary_str, itr)\n \n\n if itr % 500 == 0:\n valid_images, valid_annotations = validation_dataset_reader.next_batch(FLAGS.batch_size)\n \n valid_loss, summary_sva = sess.run([loss, loss_summary], feed_dict={image: valid_images, annotation: valid_annotations,\n keep_probability: 1.0, z: z_})\n print(\"%s ---> Validation_loss: %g\" % (datetime.datetime.now(), valid_loss))\n\n # add validation loss to TensorBoard\n validation_writer.add_summary(summary_sva, itr)\n saver.save(sess, FLAGS.logs_dir + \"model_z_center.ckpt\", 500)\n\n elif FLAGS.mode == \"visualize\":\n valid_images, valid_annotations = validation_dataset_reader.get_random_batch(50)\n z_ = np.random.uniform(low=-1.0, high=1.0, size=(FLAGS.batch_size,4,4,128))\n feed_dict = {image: valid_images, annotation: valid_annotations, keep_probability: 0.85, z: z_}\n v= 0\n for p in range(50):\n z_ol = np.copy(z_)\n # print(\"666666666666666666666666666666666666666\")\n z_loss, summ = sess.run([loss,loss_summary], feed_dict=feed_dict)\n print(\"z_step: %d, Train_loss:%g\" % (p,z_loss))\n# print(z_)\n g = sess.run([grads],feed_dict=feed_dict)\n v_prev = np.copy(v)\n # print(g[0][0].shape)\n v = 0.001*v - 0.1*g[0][0]\n z_ += 0.001 * v_prev + (1+0.001)*v\n # z_ = np.clip(z_, -1.0, 1.0)\n \n pred = sess.run(logits, feed_dict={image: valid_images, annotation: valid_annotations,z:z_,\n keep_probability: 1.0})\n \n\n \n valid_images_masked = (1-sess.run(mask))*valid_images\n predicted_patch = sess.run(mask) * pred\n pred = valid_images_masked + predicted_patch \n # valid_annotations = np.squeeze(valid_annotations, axis=3)\n # pred = np.squeeze(pred, axis=3)\n print(valid_images.shape)\n print(valid_annotations.shape)\n print(pred.shape)\n\n for itr in range(FLAGS.batch_size):\n utils.save_image(valid_images_masked[itr].astype(np.uint8), FLAGS.logs_dir, name=\"inp_\" + str(5+itr))\n utils.save_image(valid_annotations[itr].astype(np.uint8), FLAGS.logs_dir, name=\"gt_\" + str(5+itr))\n utils.save_image(pred[itr].astype(np.uint8), FLAGS.logs_dir, name=\"predz_\" + str(5+itr))\n print(\"Saved image: %d\" % itr)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.flags.DEFINE_float", "tensorflow.summary.image", "tensorflow.ones", "tensorflow.variable_scope", "numpy.copy", "tensorflow.concat", "tensorflow.summary.FileWriter", "tensorflow.nn.dropout", "numpy.transpose", "tensorflow.global_variables_initializer", "numpy.mean", "numpy.random.uniform", "tensorflow.flags.DEFINE_integer", "tensorflow.shape", "tensorflow.stack", "tensorflow.app.run", "tensorflow.expand_dims", "tensorflow.cast", "tensorflow.gradients", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.flags.DEFINE_string", "tensorflow.layers.dense", "tensorflow.pad", "tensorflow.placeholder", "numpy.squeeze", "tensorflow.summary.merge_all", "tensorflow.train.get_checkpoint_state", "tensorflow.flags.DEFINE_bool", "tensorflow.train.AdamOptimizer", "tensorflow.add", "tensorflow.trainable_variables", "tensorflow.square", "tensorflow.argmax", "tensorflow.nn.relu" ] ]
sizhky/carla-dataset-runner
[ "a670d981d29de78460cd90b1d4949ee4b71d0ade" ]
[ "HDF5Saver.py" ]
[ "import h5py\nimport numpy as np\n\n\nclass HDF5Saver:\n def __init__(self, sensor_width, sensor_height, file_path_to_save=\"data/carla_dataset.hdf5\"):\n self.sensor_width = sensor_width\n self.sensor_height = sensor_height\n\n self.file = h5py.File(file_path_to_save, \"w\")\n # Creating groups to store each type of data\n self.rgb_group = self.file.create_group(\"rgb\")\n self.depth_group = self.file.create_group(\"depth\")\n self.ego_speed_group = self.file.create_group(\"ego_speed\")\n self.bounding_box_group = self.file.create_group(\"bounding_box\")\n self.bb_vehicles_group = self.bounding_box_group.create_group(\"vehicles\")\n self.bb_walkers_group = self.bounding_box_group.create_group(\"walkers\")\n self.timestamp_group = self.file.create_group(\"timestamps\")\n\n # Storing metadata\n self.file.attrs['sensor_width'] = sensor_width\n self.file.attrs['sensor_height'] = sensor_height\n self.file.attrs['simulation_synchronization_type'] = \"syncd\"\n self.rgb_group.attrs['channels'] = 'R,G,B'\n self.ego_speed_group.attrs['x,y,z_velocity'] = 'in m/s'\n self.bounding_box_group.attrs['data_description'] = 'Each 4 entries in the same row present one individual actor in the scene.'\n self.bounding_box_group.attrs['bbox_format'] = '[xmin, ymin, xmax, ymax] (top left coords; right bottom coords)' \\\n 'the vector has been flattened; therefore the data must' \\\n 'be captured in blocks of 4 elements'\n self.timestamp_group.attrs['time_format'] = \"current time in MILISSECONDS since the unix epoch \" \\\n \"(time.time()*1000 in python3)\"\n\n def record_data(self, rgb_array, depth_array, bounding_box, ego_speed, timestamp):\n timestamp = str(timestamp)\n self.rgb_group.create_dataset(timestamp, data=rgb_array)\n self.depth_group.create_dataset(timestamp, data=depth_array)\n self.ego_speed_group.create_dataset(timestamp, data=ego_speed)\n self.bb_vehicles_group.create_dataset(timestamp, data=bounding_box[0])\n self.bb_walkers_group.create_dataset(timestamp, data=bounding_box[1])\n\n def record_all_timestamps(self, timestamps_list):\n self.timestamp_group.create_dataset(\"timestamps\", data=np.array(timestamps_list))\n\n def close_HDF5(self):\n self.file.close()\n" ]
[ [ "numpy.array" ] ]
VinhLoiIT/ignite
[ "3b2b9655ea9f80ce49b8a9f1c2d72f80e2a95f56" ]
[ "ignite/contrib/engines/common.py" ]
[ "from functools import partial\nimport warnings\nimport numbers\n\nfrom collections.abc import Sequence, Mapping\n\nimport torch\nimport torch.distributed as dist\n\nfrom ignite.engine import Engine, Events\nfrom ignite.metrics import RunningAverage\nfrom ignite.handlers import TerminateOnNan, ModelCheckpoint, EarlyStopping\nfrom ignite.contrib.metrics import GpuInfo\nfrom ignite.contrib.handlers import ProgressBar\nfrom ignite.contrib.handlers import VisdomLogger\nfrom ignite.contrib.handlers import TensorboardLogger, global_step_from_engine\nimport ignite.contrib.handlers.tensorboard_logger as tb_logger_module\nimport ignite.contrib.handlers.visdom_logger as visdom_logger_module\nfrom ignite.contrib.handlers import MLflowLogger\nimport ignite.contrib.handlers.mlflow_logger as mlflow_logger_module\nfrom ignite.contrib.handlers import PolyaxonLogger\nimport ignite.contrib.handlers.polyaxon_logger as polyaxon_logger_module\n\n\ndef setup_common_training_handlers(\n trainer,\n train_sampler=None,\n to_save=None,\n save_every_iters=1000,\n output_path=None,\n lr_scheduler=None,\n with_gpu_stats=False,\n output_names=None,\n with_pbars=True,\n with_pbar_on_iters=True,\n log_every_iters=100,\n device=\"cuda\",\n):\n \"\"\"Helper method to setup trainer with common handlers (it also supports distributed configuration):\n - :class:`~ignite.handlers.TerminateOnNan`\n - handler to setup learning rate scheduling\n - :class:`~ignite.handlers.ModelCheckpoint`\n - :class:`~ignite.metrics.RunningAverage` on `update_function` output\n - Two progress bars on epochs and optionally on iterations\n\n Args:\n trainer (Engine): trainer engine. Output of trainer's `update_function` should be a dictionary\n or sequence or a single tensor.\n train_sampler (torch.utils.data.DistributedSampler, optional): Optional distributed sampler used to call\n `set_epoch` method on epoch started event.\n to_save (dict, optional): dictionary with objects to save in the checkpoint. This is used with\n :class:`~ignite.handlers.ModelCheckpoint`.\n save_every_iters (int, optional): saving interval. By default, `to_save` objects are stored\n each 1000 iterations.\n output_path (str, optional): output path to indicate where `to_save` objects are stored.\n lr_scheduler (ParamScheduler or subclass of `torch.optim.lr_scheduler._LRScheduler`): learning rate scheduler\n as native torch LRScheduler or ignite's parameter scheduler.\n with_gpu_stats (bool, optional): if True, :class:`~ignite.contrib.metrics.handlers.GpuInfo` is attached to the\n trainer. This requires `pynvml` package to be installed.\n output_names (list/tuple): list of names associated with `update_function` output dictionary.\n with_pbars (bool, optional): if True, two progress bars on epochs and optionally on iterations are attached\n with_pbar_on_iters (bool, optional): if True, a progress bar on iterations is attached to the trainer.\n log_every_iters (int, optional): logging interval for :class:`~ignite.contrib.metrics.handlers.GpuInfo` and for\n epoch-wise progress bar.\n device (str of torch.device, optional): Optional device specification in case of distributed computation usage.\n \"\"\"\n kwargs = dict(\n to_save=to_save,\n save_every_iters=save_every_iters,\n output_path=output_path,\n lr_scheduler=lr_scheduler,\n with_gpu_stats=with_gpu_stats,\n output_names=output_names,\n with_pbars=with_pbars,\n with_pbar_on_iters=with_pbar_on_iters,\n log_every_iters=log_every_iters,\n device=device,\n )\n if dist.is_available() and dist.is_initialized():\n _setup_common_distrib_training_handlers(trainer, train_sampler=train_sampler, **kwargs)\n else:\n if train_sampler is not None:\n warnings.warn(\n \"Argument train_sampler distributed sampler used to call `set_epoch` method on epoch \"\n \"started event, but no distributed setting detected\",\n UserWarning,\n )\n _setup_common_training_handlers(trainer, **kwargs)\n\n\nsetup_common_distrib_training_handlers = setup_common_training_handlers\n\n\ndef _setup_common_training_handlers(\n trainer,\n to_save=None,\n save_every_iters=1000,\n output_path=None,\n lr_scheduler=None,\n with_gpu_stats=True,\n output_names=None,\n with_pbars=True,\n with_pbar_on_iters=True,\n log_every_iters=100,\n device=\"cuda\",\n):\n trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())\n\n if lr_scheduler is not None:\n if isinstance(lr_scheduler, torch.optim.lr_scheduler._LRScheduler):\n trainer.add_event_handler(Events.ITERATION_COMPLETED, lambda engine: lr_scheduler.step())\n else:\n trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)\n\n trainer.add_event_handler(Events.EPOCH_COMPLETED, empty_cuda_cache)\n\n if to_save is not None:\n if output_path is None:\n raise ValueError(\"If to_save argument is provided then output_path argument should be also defined\")\n checkpoint_handler = ModelCheckpoint(dirname=output_path, filename_prefix=\"training\")\n trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler, to_save)\n\n if with_gpu_stats:\n GpuInfo().attach(trainer, name=\"gpu\", event_name=Events.ITERATION_COMPLETED(every=log_every_iters))\n\n if output_names is not None:\n\n def output_transform(x, index, name):\n if isinstance(x, Mapping):\n return x[name]\n elif isinstance(x, Sequence):\n return x[index]\n elif isinstance(x, (torch.Tensor, numbers.Number)):\n return x\n else:\n raise ValueError(\n \"Unhandled type of update_function's output. \"\n \"It should either mapping or sequence, but given {}\".format(type(x))\n )\n\n for i, n in enumerate(output_names):\n RunningAverage(\n output_transform=partial(output_transform, index=i, name=n), epoch_bound=False, device=device\n ).attach(trainer, n)\n\n if with_pbars:\n if with_pbar_on_iters:\n ProgressBar(persist=False).attach(\n trainer, metric_names=\"all\", event_name=Events.ITERATION_COMPLETED(every=log_every_iters)\n )\n\n ProgressBar(persist=True, bar_format=\"\").attach(\n trainer, event_name=Events.EPOCH_STARTED, closing_event_name=Events.COMPLETED\n )\n\n\ndef _setup_common_distrib_training_handlers(\n trainer,\n train_sampler=None,\n to_save=None,\n save_every_iters=1000,\n output_path=None,\n lr_scheduler=None,\n with_gpu_stats=True,\n output_names=None,\n with_pbars=True,\n with_pbar_on_iters=True,\n log_every_iters=100,\n device=\"cuda\",\n):\n if not (dist.is_available() and dist.is_initialized()):\n raise RuntimeError(\"Distributed setting is not initialized, please call `dist.init_process_group` before.\")\n\n _setup_common_training_handlers(\n trainer,\n to_save=None,\n lr_scheduler=lr_scheduler,\n with_gpu_stats=with_gpu_stats,\n output_names=output_names,\n with_pbars=(dist.get_rank() == 0) and with_pbars,\n with_pbar_on_iters=with_pbar_on_iters,\n log_every_iters=log_every_iters,\n device=device,\n )\n\n if train_sampler is not None:\n if not callable(getattr(train_sampler, \"set_epoch\", None)):\n raise TypeError(\"Train sampler should have `set_epoch` method\")\n\n @trainer.on(Events.EPOCH_STARTED)\n def distrib_set_epoch(engine):\n train_sampler.set_epoch(engine.state.epoch - 1)\n\n if dist.get_rank() == 0:\n if to_save is not None:\n if output_path is None:\n raise ValueError(\"If to_save argument is provided then output_path argument should be also defined\")\n checkpoint_handler = ModelCheckpoint(dirname=output_path, filename_prefix=\"training\")\n trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler, to_save)\n\n\ndef empty_cuda_cache(_):\n torch.cuda.empty_cache()\n import gc\n\n gc.collect()\n\n\ndef setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, log_every_iters):\n if optimizers is not None:\n from torch.optim.optimizer import Optimizer\n\n if not isinstance(optimizers, (Optimizer, Mapping)):\n raise TypeError(\"Argument optimizers should be either a single optimizer or a dictionary or optimizers\")\n\n if evaluators is not None:\n if not isinstance(evaluators, (Engine, Mapping)):\n raise TypeError(\"Argument optimizers should be either a single optimizer or a dictionary or optimizers\")\n\n if log_every_iters is None:\n log_every_iters = 1\n\n logger.attach(\n trainer,\n log_handler=logger_module.OutputHandler(tag=\"training\", metric_names=\"all\"),\n event_name=Events.ITERATION_COMPLETED(every=log_every_iters),\n )\n\n if optimizers is not None:\n # Log optimizer parameters\n if isinstance(optimizers, Optimizer):\n optimizers = {None: optimizers}\n\n for k, optimizer in optimizers.items():\n logger.attach(\n trainer,\n log_handler=logger_module.OptimizerParamsHandler(optimizer, param_name=\"lr\", tag=k),\n event_name=Events.ITERATION_STARTED(every=log_every_iters),\n )\n\n if evaluators is not None:\n # Log evaluation metrics\n if isinstance(evaluators, Engine):\n evaluators = {\"validation\": evaluators}\n\n for k, evaluator in evaluators.items():\n gst = global_step_from_engine(trainer)\n logger.attach(\n evaluator,\n log_handler=logger_module.OutputHandler(tag=k, metric_names=\"all\", global_step_transform=gst),\n event_name=Events.COMPLETED,\n )\n\n\ndef setup_tb_logging(output_path, trainer, optimizers=None, evaluators=None, log_every_iters=100):\n \"\"\"Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are:\n - Training metrics, e.g. running average loss values\n - Learning rate(s)\n - Evaluation metrics\n\n Args:\n output_path (str): logging directory path\n trainer (Engine): trainer engine\n optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of\n torch optimizers. If a dictionary, keys are used as tags arguments for logging.\n evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,\n keys are used as tags arguments for logging.\n log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,\n value can be set to 1 or None.\n\n Returns:\n TensorboardLogger\n \"\"\"\n tb_logger = TensorboardLogger(log_dir=output_path)\n setup_any_logging(tb_logger, tb_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters)\n return tb_logger\n\n\ndef setup_visdom_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs):\n \"\"\"Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are:\n - Training metrics, e.g. running average loss values\n - Learning rate(s)\n - Evaluation metrics\n\n Args:\n trainer (Engine): trainer engine\n optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of\n torch optimizers. If a dictionary, keys are used as tags arguments for logging.\n evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,\n keys are used as tags arguments for logging.\n log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,\n value can be set to 1 or None.\n **kwargs: kwargs to pass into VisdomLogger\n\n Returns:\n VisdomLogger\n \"\"\"\n vis_logger = VisdomLogger(**kwargs)\n setup_any_logging(\n vis_logger, visdom_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters\n )\n return vis_logger\n\n\ndef setup_mlflow_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100):\n \"\"\"Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:\n - Training metrics, e.g. running average loss values\n - Learning rate(s)\n - Evaluation metrics\n\n Args:\n trainer (Engine): trainer engine\n optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of\n torch optimizers. If a dictionary, keys are used as tags arguments for logging.\n evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,\n keys are used as tags arguments for logging.\n log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,\n value can be set to 1 or None.\n\n Returns:\n MLflowLogger\n \"\"\"\n mlflow_logger = MLflowLogger()\n setup_any_logging(\n mlflow_logger, mlflow_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters\n )\n return mlflow_logger\n\n\ndef setup_plx_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100):\n \"\"\"Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:\n - Training metrics, e.g. running average loss values\n - Learning rate(s)\n - Evaluation metrics\n\n Args:\n trainer (Engine): trainer engine\n optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of\n torch optimizers. If a dictionary, keys are used as tags arguments for logging.\n evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,\n keys are used as tags arguments for logging.\n log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,\n value can be set to 1 or None.\n\n Returns:\n PolyaxonLogger\n \"\"\"\n plx_logger = PolyaxonLogger()\n setup_any_logging(\n plx_logger, polyaxon_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters\n )\n return plx_logger\n\n\ndef get_default_score_fn(metric_name):\n def wrapper(engine):\n score = engine.state.metrics[metric_name]\n return score\n\n return wrapper\n\n\ndef save_best_model_by_val_score(output_path, evaluator, model, metric_name, n_saved=3, trainer=None, tag=\"val\"):\n \"\"\"Method adds a handler to `evaluator` to save best models based on the score (named by `metric_name`)\n provided by `evaluator`.\n\n Args:\n output_path (str): output path to indicate where to save best models\n evaluator (Engine): evaluation engine used to provide the score\n model (nn.Module): model to store\n metric_name (str): metric name to use for score evaluation. This metric should be present in\n `evaluator.state.metrics`.\n n_saved (int, optional): number of best models to store\n trainer (Engine, optional): trainer engine to fetch the epoch when saving the best model.\n tag (str, optional): score name prefix: `{tag}_{metric_name}`. By default, tag is \"val\".\n\n \"\"\"\n global_step_transform = None\n if trainer is not None:\n global_step_transform = global_step_from_engine(trainer)\n\n best_model_handler = ModelCheckpoint(\n dirname=output_path,\n filename_prefix=\"best\",\n n_saved=n_saved,\n global_step_transform=global_step_transform,\n score_name=\"{}_{}\".format(tag, metric_name.lower()),\n score_function=get_default_score_fn(metric_name),\n )\n evaluator.add_event_handler(Events.COMPLETED, best_model_handler, {\"model\": model,})\n\n\ndef add_early_stopping_by_val_score(patience, evaluator, trainer, metric_name):\n \"\"\"Method setups early stopping handler based on the score (named by `metric_name`) provided by `evaluator`.\n\n Args:\n patience (int): number of events to wait if no improvement and then stop the training.\n evaluator (Engine): evaluation engine used to provide the score\n trainer (Engine): trainer engine to stop the run if no improvement.\n metric_name (str): metric name to use for score evaluation. This metric should be present in\n `evaluator.state.metrics`.\n\n \"\"\"\n es_handler = EarlyStopping(patience=patience, score_function=get_default_score_fn(metric_name), trainer=trainer)\n evaluator.add_event_handler(Events.COMPLETED, es_handler)\n" ]
[ [ "torch.cuda.empty_cache", "torch.distributed.is_available", "torch.distributed.get_rank", "torch.distributed.is_initialized" ] ]
tarkantemizoz/Cost-Sensitive-Learning
[ "083f8dfd2950b7e3874df34bf61c2ca1e4a91fbb" ]
[ "Models/opt_torch.py" ]
[ "# coding: utf-8\n# Copyright 2020 Tarkan Temizoz\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom sklearn.utils import shuffle\nfrom Models.linearnet import LinearNet\n\nclass Optimization:\n \"\"\" A helper class to train, test and diagnose Cost-sensitive Logistic Regression\n \n Attributes:\n model: CSLR model.\n optimizer: Optimizer of the network.\n train_return: List of train returns.\n val_return: List of validation returns.\n validation: Whether there is validation data.\n batch_size: Batch-size of the network.\n n_epochs: Total number of epochs.\n n_steps: Number of epochs to evaluate the results\n \"\"\"\n \n def __init__(self, model, optimizer, config):\n \"\"\"Initialises CLSR.\n \n Args:\n model: CSLR model.\n optimizer: Optimizer of the network.\n config: Configuration of the network.\n \"\"\"\n \n self.model = model\n self.optimizer = optimizer\n self.train_return = []\n self.val_return = []\n self.validation = False\n self.batch_size = config.get(\"batch_size\",32)\n self.n_epochs = config.get(\"n_epochs\", 1000)\n self.n_steps = config.get(\"n_steps\", self.n_epochs)\n \n @staticmethod\n def batch(iterable, n):\n \"\"\"Creates batches.\"\"\"\n \n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx:min(ndx + n, l)] \n \n def train(self, x_train, r_train, x_val=None, r_val=None):\n \"\"\"Applies simple feed-forward network to an input.\n \n Args:\n x_train: train features\n r_train: train returns\n x_val: validation features\n r_val: validation returns\n \"\"\"\n \n if x_val is not None or r_val is not None:\n self.validation = True\n start_time = time.time()\n \n for epoch in range(self.n_epochs):\n x_shuff, r_shuff = shuffle(x_train, r_train)\n self.model.train() \n for j in self.batch(range(0, len(x_shuff)),self.batch_size):\n if len(j) < 2:\n break\n x_batch = x_shuff[j]\n r_batch = r_shuff[j]\n self.optimizer.zero_grad()\n outputs, _, _ = self.model(x_batch)\n loss = -torch.mul(outputs, r_batch).sum() \n loss.backward()\n self.optimizer.step()\n\n returns_train, _, _ = self.evaluate(x_train, r_train)\n self.train_return.append(returns_train)\n if self.validation is True:\n returns_val, _, _ = self.evaluate(x_val, r_val)\n self.val_return.append(returns_val)\n \n if ((epoch+1) % self.n_steps == 0):\n elapsed = time.time() - start_time\n print(\n (\"Epoch %d Train Return: %.3f.\") % (epoch + 1, self.train_return[-1]),\n ((\" Validation Return: %.3f. Elapsed time: %.3fs.\")\n % (self.val_return[-1], elapsed)\n if self.validation is True else \n \" Elapsed time: %.3fs.\"\n % elapsed) \n )\n start_time = time.time() \n \n def evaluate(self, x_test, r_test):\n \"\"\"Evaluates simple feed-forward network to an input.\n \n Args:\n x_test: features of the evaluated data\n r_test: returns of the evaluated data\n \n Returns:\n Triple of Tensors for: (Total returns, decision variables, probabilities)\n \"\"\"\n \n with torch.no_grad():\n outputs, probs, _ = self.model(x_test)\n returns = torch.mul(outputs, r_test).sum()\n \n return returns, outputs, probs \n \n def plot_return(self):\n \"\"\"Draws a plot, Trains Returns vs Test Returns\"\"\"\n \n plt.plot(self.train_return, label=\"Train Return\")\n plt.plot(self.val_return, label=\"Test Return\")\n plt.legend()\n plt.title(\"Returns\")\n\n" ]
[ [ "matplotlib.pyplot.legend", "torch.no_grad", "sklearn.utils.shuffle", "matplotlib.pyplot.title", "torch.mul", "matplotlib.pyplot.plot" ] ]
nong-fu/grabcut
[ "19a43eed7597ffae456349e4f0568da2f8f1f25c" ]
[ "grabcut.py" ]
[ "# coding=utf-8\n\nimport sys\nfrom pathlib import Path\nimport webbrowser\n\nimport numpy as np\nimport cv2\nfrom PIL import Image\n\nfrom PyQt5.QtCore import QDir, Qt, pyqtSlot, pyqtSignal\nfrom PyQt5.QtGui import QImage, QPixmap, QColor\nfrom PyQt5.QtWidgets import (\n QApplication, QMainWindow, QWidget,\n QMessageBox, QFileDialog, QLabel, QSpinBox, QPushButton,\n QActionGroup, QAction, QSizePolicy, QHBoxLayout,\n)\n\nfrom ui_grabcut import Ui_MainWindow\n\n\nclass Canvas(QLabel):\n \"\"\"Canvas for drawing mask layer on Image.\n \"\"\"\n\n mousePressed = pyqtSignal()\n mouseMoved = pyqtSignal(int, int, int, int)\n\n def __init__(self, parent):\n super().__init__(parent)\n self.parent = parent\n # self.setStyleSheet(\"border: 1px solid red;\")\n self.last_x, self.last_y = None, None\n\n def mousePressEvent(self, e):\n self.mousePressed.emit()\n\n def mouseMoveEvent(self, e):\n x, y = e.x(), e.y()\n\n if self.last_x is None:\n self.last_x, self.last_y = x, y\n return\n\n self.mouseMoved.emit(self.last_x, self.last_y, x, y)\n self.last_x, self.last_y = x, y\n\n def mouseReleaseEvent(self, e):\n self.last_x, self.last_y = None, None\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n\n # orign image data\n self.img = None\n # mask layer for grabcut\n self.mask = None\n # history masks for undo\n self.masks = []\n # grabcut algorithm param iterCount\n self.iterCount = 5\n\n # canvas image cache\n self.imgWithMask = None\n # mask mode to color, don't use dict, too slow!\n self.mode2color = (\n # cv2.GC_BGD == 0\n np.array([0, 0, 255], dtype=np.uint8),\n # cv2.GC_FGD == 1\n np.array([0, 255, 0], dtype=np.uint8),\n # cv2.GC_PR_BGD == 2\n np.array([0, 0, 120], dtype=np.uint8),\n # cv2.GC_PR_FGD == 3\n np.array([0, 120, 0], dtype=np.uint8),\n )\n # NONE mean none of (BGD/FGD/PR_BGD/PR_FGD)\n self.GC_NONE = 255\n # mask layer alpha\n self.alpha = 0.3\n\n self.imgPath = Path.cwd()\n self.penSize = 40\n\n # init ui order matter\n self.initUI()\n\n def grabCut(self, iterCount):\n if self.img is None:\n self.showMessage(\"No image\")\n return\n\n # avoid grabCut crash\n if not np.any((self.mask == cv2.GC_FGD) | (self.mask == cv2.GC_PR_FGD)):\n self.showMessage(\"no GC_FGD or GC_PR_FGD\")\n return\n\n # before grabcut, save mask to stack\n self.pushMask()\n bgdModel = np.zeros((1, 65), np.float64)\n fgdModel = np.zeros((1, 65), np.float64)\n _ = cv2.grabCut(self.img, self.mask, None, bgdModel,\n fgdModel, iterCount, cv2.GC_INIT_WITH_MASK)\n self.drawPartialImgWithMask(self.masks[-1], self.mask)\n\n # display result\n self.ui.displayResultAction.setChecked(True)\n self.repaint()\n\n def drawingMask(self, x1, y1, x2, y2):\n \"\"\"drawing an small partial of the mask layer,\n which is a small line segment.\n \"\"\"\n if self.img is None:\n return\n # when hidden mask or display result, don't draw mask\n if self.ui.hiddenMaskAction.isChecked() or \\\n self.ui.displayResultAction.isChecked():\n return\n\n if self.ui.prFgdAction.isChecked():\n mode = cv2.GC_PR_FGD\n elif self.ui.prBgdAction.isChecked():\n mode = cv2.GC_PR_BGD\n elif self.ui.fgdAction.isChecked():\n mode = cv2.GC_FGD\n else: # bgdAction\n mode = cv2.GC_BGD\n\n cv2.line(self.mask, (x1, y1), (x2, y2), mode, self.penSize)\n partialMask = np.zeros(self.mask.shape, np.uint8)\n # GC_BGD is 0, can't use 0 as default\n partialMask.fill(self.GC_NONE)\n cv2.line(partialMask, (x1, y1), (x2, y2), mode, self.penSize)\n\n indices = np.where(partialMask != self.GC_NONE)\n if indices[0].size == 0:\n # nothing new in partialMask\n return\n self.imgWithMask[indices] = (1 - self.alpha)*self.img[indices] + \\\n self.alpha*self.mode2color[mode]\n\n self.repaint()\n\n def pushMask(self):\n \"\"\"push a mask to history list masks for undo.\n \"\"\"\n # if mask hasn't changed\n if len(self.masks) > 0 and np.array_equal(self.masks[-1], self.mask):\n return\n\n self.masks.append(self.mask.copy())\n\n def drawPartialImgWithMask(self, curMask, newMask):\n \"\"\"draw partial imgWithMask.\n\n mask changed from curMask to newMask, only draw the changed part.\n \"\"\"\n # redraw partial imgWithMask\n indices = np.where(curMask != newMask)\n if indices[0].size == 0:\n # two masks are equal\n return\n self.imgWithMask[indices] = (1-self.alpha)*self.img[indices] + \\\n self.alpha*np.array([self.mode2color[m] for m in newMask[indices]])\n\n def getResult(self):\n \"\"\"use mask cuf off forground area as final result.\n \"\"\"\n result_mask = np.where((self.mask == 2) | (\n self.mask == 0), 0, 1).astype('uint8')\n return self.img*result_mask[:, :, np.newaxis]\n\n @pyqtSlot(name=\"on_displayResultAction_triggered\")\n @pyqtSlot(name=\"on_hiddenMaskAction_triggered\")\n def repaint(self):\n \"\"\"repaint cavans.\n \"\"\"\n if self.img is None:\n self.showMessage(\"No image\")\n return\n\n if self.ui.displayResultAction.isChecked():\n img = self.getResult()\n elif self.ui.hiddenMaskAction.isChecked():\n img = self.img\n else:\n img = self.imgWithMask\n\n # convert opencv image to qt image\n height, width, _ = img.shape\n bytesOfLine = 3*width\n image = QImage(img.tobytes(), width, height,\n bytesOfLine, QImage.Format_RGB888).rgbSwapped()\n self.canvas.setPixmap(QPixmap.fromImage(image))\n\n def initUI(self):\n # merge designer ui\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n # right box on toolbar\n rightBox = QWidget(self.ui.toolBar)\n boxLayout = QHBoxLayout()\n\n # grabcut iterCount spinbox\n boxLayout.addWidget(QLabel(\"iterCount\"))\n self.iterCountSpinBox = QSpinBox(self)\n self.iterCountSpinBox.setRange(1, 100)\n self.iterCountSpinBox.setValue(5)\n boxLayout.addWidget(self.iterCountSpinBox)\n\n boxLayout.addStretch(1)\n\n # pen size spinbox\n boxLayout.addWidget(QLabel(\"pen\"))\n self.penSizeSpinBox = QSpinBox(self)\n self.penSizeSpinBox.setRange(1, 500)\n self.penSizeSpinBox.setSingleStep(5)\n self.penSizeSpinBox.setValue(40)\n boxLayout.addWidget(self.penSizeSpinBox)\n\n rightBox.setLayout(boxLayout)\n self.ui.toolBar.addWidget(rightBox)\n\n self.canvas = Canvas(self)\n self.ui.scrollArea.setWidget(self.canvas)\n # canvas align center in scroll area\n self.ui.scrollArea.setAlignment(Qt.AlignCenter)\n # fixed canvas that make it easier to select mask layer\n self.canvas.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n\n # 4 types of mask layer flags\n actionGroup = QActionGroup(self)\n actionGroup.addAction(self.ui.fgdAction)\n actionGroup.addAction(self.ui.bgdAction)\n actionGroup.addAction(self.ui.prFgdAction)\n actionGroup.addAction(self.ui.prBgdAction)\n\n # handle events\n self.ui.exitAction.triggered.connect(self.close)\n self.penSizeSpinBox.valueChanged.connect(self.setPenSize)\n self.iterCountSpinBox.valueChanged.connect(self.setIterCount)\n\n self.ui.opencvAction.triggered.connect(lambda: webbrowser.open(\n 'https://opencv-python-tutroals.readthedocs.io/en/'\n 'latest/py_tutorials/py_imgproc/py_grabcut/py_grabcut.html'\n ))\n\n self.canvas.mousePressed.connect(self.pushMask)\n self.canvas.mouseMoved.connect(self.drawingMask)\n\n self.resetUiToDrawMaskMode()\n\n def resetUiToDrawMaskMode(self):\n \"\"\"reset ui to draw mask mode.\n \"\"\"\n self.ui.prFgdAction.setChecked(True)\n self.ui.displayResultAction.setChecked(False)\n self.ui.hiddenMaskAction.setChecked(False)\n\n def setPenSize(self, v):\n self.penSize = v\n\n def setIterCount(self, v):\n self.iterCount = v\n\n def showMessage(self, msg):\n self.ui.statusbar.showMessage(msg)\n\n @pyqtSlot(name=\"on_openAction_triggered\")\n def openImage(self):\n fileName, _ = QFileDialog.getOpenFileName(\n self, \"Open File\", str(self.imgPath))\n if not fileName:\n return\n\n imgFile = Path(fileName)\n self.imgPath = imgFile.parent\n\n # cv2.imread can't read image that path contain chinese characters,\n # so this is a workaround.\n # self.img = cv2.imread(fileName)\n data = np.fromfile(fileName, dtype=np.uint8)\n self.img = cv2.imdecode(data, cv2.IMREAD_UNCHANGED)\n # discarding alpha channel\n self.img = self.img[:,:,:3]\n self.reset()\n\n @pyqtSlot(name=\"on_saveAction_triggered\")\n def saveResult(self):\n if self.img is None:\n self.showMessage(\"no result to save\")\n return\n\n fileName, _ = QFileDialog.getSaveFileName(\n self, \"Save File\", str(self.imgPath))\n if not fileName:\n return\n\n imgFile = Path(fileName)\n self.imgPath = imgFile.parent\n # default save as png\n if not imgFile.suffix:\n imgFile = imgFile.with_suffix('.png')\n result = self.getResult()\n # cv2.imwrite can't write image that path contain chinese characters.\n im = Image.fromarray(result)\n im.save(imgFile.as_posix())\n\n @pyqtSlot(name=\"on_exportMaskAction_triggered\")\n def exportMask(self):\n if self.mask is None or not self.mask.any():\n self.showMessage(\"no mask\")\n return\n fileName, _ = QFileDialog.getSaveFileName(\n self, \"Save Mask\", str(self.imgPath))\n if not fileName:\n return\n\n imgFile = Path(fileName)\n self.imgPath = imgFile.parent\n # default save as png\n if not imgFile.suffix:\n imgFile = imgFile.with_suffix('.png')\n im = Image.fromarray(self.mask)\n im.save(imgFile.as_posix())\n\n @pyqtSlot(name=\"on_undoAction_triggered\")\n def undo(self):\n if len(self.masks) == 0:\n self.showMessage(\"undo stack is empty\")\n return\n\n prevMask = self.masks.pop()\n self.drawPartialImgWithMask(self.mask, prevMask)\n self.mask = prevMask\n\n # after undo, uncheck display result and hidden mask\n self.resetUiToDrawMaskMode()\n self.repaint()\n\n @pyqtSlot(name=\"on_resetAction_triggered\")\n def reset(self):\n if self.img is None:\n self.showMessage(\"No image\")\n return\n\n self.mask = np.zeros(self.img.shape[:2], np.uint8)\n self.mask.fill(cv2.GC_PR_BGD)\n self.masks = []\n\n # re-create imgWidthMask\n self.imgWithMask = np.zeros(self.img.shape, np.uint8)\n self.imgWithMask[...] = (1-self.alpha)*self.img + \\\n self.alpha*self.mode2color[cv2.GC_PR_BGD]\n\n self.resetUiToDrawMaskMode()\n self.repaint()\n\n @pyqtSlot(name=\"on_grabCutAction_triggered\")\n def runGrabCut(self):\n self.grabCut(self.iterCount)\n\n @pyqtSlot(name=\"on_singleStepAction_triggered\")\n def runGrabCutSingleStep(self):\n self.grabCut(1)\n\n def closeEvent(self, evt):\n # maybe popup a dialog to ask user accept or ignore\n evt.accept()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n app.exec_()\n" ]
[ [ "numpy.fromfile", "numpy.zeros", "numpy.any", "numpy.array_equal", "numpy.array", "numpy.where" ] ]
dongkcs/mindspore
[ "cd7df6dbf463ff3128e9181e9d0c779cecb81320" ]
[ "model_zoo/official/cv/alexnet/export.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n##############export checkpoint file into air and onnx models#################\npython export.py\n\"\"\"\nimport argparse\nimport numpy as np\n\nimport mindspore as ms\nfrom mindspore import Tensor\nfrom mindspore import context\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net, export\n\nfrom src.config import alexnet_cifar10_cfg, alexnet_imagenet_cfg\nfrom src.alexnet import AlexNet\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Classification')\n parser.add_argument('--dataset_name', type=str, default='cifar10', choices=['imagenet', 'cifar10'],\n help='please choose dataset: imagenet or cifar10.')\n parser.add_argument('--device_target', type=str, default=\"Ascend\",\n choices=['Ascend', 'GPU'],\n help='device where the code will be implemented (default: Ascend)')\n parser.add_argument('--ckpt_path', type=str, default=\"./ckpt\", help='if is test, must provide\\\n path where the trained ckpt file')\n args_opt = parser.parse_args()\n context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target)\n\n if args_opt.dataset_name == 'cifar10':\n cfg = alexnet_cifar10_cfg\n elif args_opt.dataset_name == 'imagenet':\n cfg = alexnet_imagenet_cfg\n else:\n raise ValueError(\"dataset is not support.\")\n\n net = AlexNet(num_classes=cfg.num_classes)\n\n param_dict = load_checkpoint(args_opt.ckpt_path)\n load_param_into_net(net, param_dict)\n\n input_arr = Tensor(np.random.uniform(0.0, 1.0, size=[1, 3, cfg.image_height, cfg.image_width]), ms.float32)\n export(net, input_arr, file_name=cfg.air_name, file_format=\"AIR\")\n" ]
[ [ "numpy.random.uniform" ] ]
Gretacyh/images-downloader-fliter
[ "ffe070026a45c741013a575a6a985d97e28d6fd7" ]
[ "img_filter/img_advanced_filter.py" ]
[ "import os\nimport re\nimport cv2\nimport umap\nimport torch\nimport torch.nn as nn\nfrom torchvision import models\nimport torch.nn.functional as F\nimport numpy as np\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\n\n\ndef global_std_pool2d(x):\n \"\"\"2D global standard variation pooling\"\"\"\n return torch.std(x.view(x.size()[0], x.size()[1], -1, 1), dim=2, keepdim=True)\n\n\nclass ResNet50(torch.nn.Module):\n \"\"\"Modified ResNet50 for feature extraction\"\"\"\n\n def __init__(self):\n super(ResNet50, self).__init__()\n self.features = nn.Sequential(*list(models.resnet50(pretrained=True).children())[:-2])\n # 冻结模型\n for p in self.features.parameters():\n p.requires_grad = False\n # 检测是否有GPU\n if torch.cuda.is_available():\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n self.to(self.device)\n\n def forward(self, x):\n # features@: 7->res5c\n for ii, model in enumerate(self.features):\n x = model(x)\n if ii == 7:\n features_mean = nn.functional.adaptive_avg_pool2d(x, 1)\n features_std = global_std_pool2d(x)\n return features_mean, features_std\n\n\n# 提取图像特征\ndef get_img_feature(model, img_path):\n img = cv2.imread(img_path, flags=cv2.IMREAD_COLOR)\n img = torch.from_numpy(img)\n img = img.to(model.device).float()\n img = torch.unsqueeze(img, 0) # batch size 1\n img = img.permute(0, 3, 1, 2)\n feature = model(img)\n return feature\n\n\n# UMAP降维\ndef do_umap(features, channel=2, random_state=None):\n model = umap.UMAP(n_components=channel, random_state=random_state)\n return model.fit_transform(features), model\n\n\n# t-SNE降维\ndef do_tsne(data, random_state=0):\n tsne = TSNE(n_components=2, init='pca', random_state=random_state)\n return tsne.fit_transform(data), tsne\n\n\n# 绘制数据图像\ndef plot_embedding(data, type=None, text=None, title=\"\", colors=None):\n if type is None:\n type = np.zeros_like(data[:, 0])\n x_min, x_max = np.min(data, 0), np.max(data, 0)\n data = (data - x_min) / (x_max - x_min)\n\n fig = plt.figure()\n ax = plt.subplot(111)\n for i in range(data.shape[0]):\n if text is not None:\n plt.text(data[i, 0], data[i, 1], str(text[i]),\n color=plt.cm.Set1((text[i] + 1) / 10.) if colors is None else colors[type[i]],\n fontdict={'weight': 'bold', 'size': 8})\n else:\n plt.scatter(data[i, 0], data[i, 1], s=3,\n color=plt.cm.Set1((text[i] + 1) / 10.) if colors is None else colors[type[i]])\n plt.xticks([])\n plt.yticks([])\n plt.title(title)\n plt.show()\n return fig\n\n\nif __name__ == '__main__':\n root_dir = \"/root/yanghan/cat\"\n file_suffix = \"jpeg|jpg|png\"\n remove_dir = root_dir + \"/remove\"\n if not os.path.exists(remove_dir):\n os.makedirs(remove_dir)\n\n # 模型初始化\n model = ResNet50()\n # 提取图像特征\n feature_list = []\n name_list = []\n for img_name in os.listdir(root_dir)[:]:\n # 对处理文件的类型进行过滤\n if re.search(file_suffix, img_name) is None:\n continue\n img_path = root_dir + \"/\" + img_name\n mean, std = get_img_feature(model, img_path)\n mean = mean.to('cpu').numpy().reshape(-1)\n std = std.to('cpu').numpy().reshape(-1)\n feature = np.concatenate((mean, std), 0)\n print(feature.shape)\n feature_list.append(feature)\n name_list.append(img_name[7:10])\n\n # 特征绘图\n feature_list = np.array(feature_list)\n name_list = np.array(name_list)\n feature_list_tsne, _ = do_tsne(feature_list)\n plot_embedding(feature_list_tsne, title=\"tsne\", text=name_list)\n feature_list_umap, _ = do_umap(feature_list)\n plot_embedding(feature_list_umap, title=\"umap\", text=name_list)\n cv2.waitKey()\n" ]
[ [ "torch.unsqueeze", "numpy.zeros_like", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure", "torch.device", "matplotlib.pyplot.title", "sklearn.manifold.TSNE", "matplotlib.pyplot.subplot", "torch.from_numpy", "matplotlib.pyplot.show", "torch.cuda.is_available", "numpy.min", "numpy.max", "numpy.array", "torch.nn.functional.adaptive_avg_pool2d", "numpy.concatenate", "matplotlib.pyplot.cm.Set1", "matplotlib.pyplot.yticks" ] ]
ZZR0/ISSTA21-JIT-DP
[ "c2916f7c3b1d235ff2858220886d6a7da068bf8a" ]
[ "DeepJIT/train.py" ]
[ "from model import DeepJIT\nimport torch \nfrom tqdm import tqdm\nfrom utils import mini_batches_train, save\nimport torch.nn as nn\nimport os, datetime\n\ndef train_model(data, params):\n data_pad_msg, data_pad_code, data_labels, dict_msg, dict_code = data\n \n # set up parameters\n params.cuda = (not params.no_cuda) and torch.cuda.is_available()\n del params.no_cuda\n params.filter_sizes = [int(k) for k in params.filter_sizes.split(',')]\n\n # params.save_dir = os.path.join(params.save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\n\n params.vocab_msg, params.vocab_code = len(dict_msg), len(dict_code) \n\n if len(data_labels.shape) == 1:\n params.class_num = 1\n else:\n params.class_num = data_labels.shape[1]\n params.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n # create and train the defect model\n model = DeepJIT(args=params)\n if torch.cuda.is_available():\n model = model.cuda()\n optimizer = torch.optim.Adam(model.parameters(), lr=params.l2_reg_lambda)\n\n criterion = nn.BCELoss()\n for epoch in range(1, params.num_epochs + 1):\n total_loss = 0\n # building batches for training model\n batches = mini_batches_train(X_msg=data_pad_msg, X_code=data_pad_code, Y=data_labels, mini_batch_size=params.batch_size)\n for i, (batch) in enumerate(tqdm(batches)):\n pad_msg, pad_code, labels = batch\n if torch.cuda.is_available(): \n pad_msg, pad_code, labels = torch.tensor(pad_msg).cuda(), torch.tensor(\n pad_code).cuda(), torch.cuda.FloatTensor(labels)\n else: \n pad_msg, pad_code, labels = torch.tensor(pad_msg).long(), torch.tensor(pad_code).long(), torch.tensor(\n labels).float()\n\n optimizer.zero_grad()\n predict = model.forward(pad_msg, pad_code)\n loss = criterion(predict, labels)\n total_loss += loss\n loss.backward()\n optimizer.step()\n\n print('Epoch %i / %i -- Total loss: %f' % (epoch, params.num_epochs, total_loss)) \n save(model, params.save_dir, 'epoch', epoch)\n" ]
[ [ "torch.nn.BCELoss", "torch.cuda.is_available", "torch.tensor", "torch.cuda.FloatTensor" ] ]
uve/tensorflow
[ "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "e08079463bf43e5963acc41da1f57e95603f8080", "1fa4cd6a566c8745f455fc3d2273208f21f88ced" ]
[ "tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py", "tensorflow/python/kernel_tests/signal/fft_ops_test.py", "tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py", "tensorflow/python/ops/distributions/distributions.py", "tensorflow/contrib/distributions/python/kernel_tests/mvn_full_covariance_test.py", "tensorflow/python/keras/initializers_test.py", "tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py", "tensorflow/python/keras/engine/training_v2_utils.py", "tensorflow/python/training/tensorboard_logging.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\nr\"\"\"Multivariate autoregressive model (vector autoregression).\r\n\r\nImplements the following model (num_blocks = max(ar_order, ma_order + 1)):\r\n\r\n y(t, 1) = \\sum_{i=1}^{ar_order} ar_coefs[i] * y(t - 1, i)\r\n y(t, i) = y(t - 1, i - 1) + ma_coefs[i - 1] * e(t) for 1 < i < num_blocks\r\n y(t, num_blocks) = y(t - 1, num_blocks - 1) + e(t)\r\n\r\nWhere e(t) are Gaussian with zero mean and learned covariance.\r\n\r\nEach element of ar_coefs and ma_coefs is a [num_features x num_features]\r\nmatrix. Each y(t, i) is a vector of length num_features. Indices in the above\r\nequations are one-based. Initial conditions y(0, i) come from prior state (which\r\nmay either be learned or left as a constant with high prior covariance).\r\n\r\nIf ar_order > ma_order, the observation model is:\r\n y(t, 1) + observation_noise(t)\r\n\r\nIf ma_order >= ar_order, it is (to observe the moving average component):\r\n y(t, 1) + y(t, num_blocks) + observation_noise(t)\r\n\r\nWhere observation_noise(t) are Gaussian with zero mean and learned covariance.\r\n\r\nThis implementation uses a formulation which puts all of the autoregressive\r\ncoefficients in the transition equation for the observed component, which\r\nenables learning using truncated backpropagation. Noise is not applied directly\r\nto the observed component (with the exception of standard observation noise),\r\nwhich further aids learning of the autoregressive coefficients when VARMA is in\r\nan ensemble with other models (in which case having an observation noise term is\r\nusually unavoidable).\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.contrib.timeseries.python.timeseries import math_utils\r\nfrom tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model\r\n\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import init_ops\r\nfrom tensorflow.python.ops import linalg_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import variable_scope\r\n\r\n\r\nclass VARMA(state_space_model.StateSpaceModel):\r\n \"\"\"A VARMA model implementation as a special case of the state space model.\"\"\"\r\n\r\n def __init__(self,\r\n autoregressive_order,\r\n moving_average_order,\r\n configuration=state_space_model.StateSpaceModelConfiguration()):\r\n \"\"\"Construct a VARMA model.\r\n\r\n The size of the latent state for this model is:\r\n num_features * max(autoregressive_order, moving_average_order + 1)\r\n Square matrices of this size are constructed and multiplied.\r\n\r\n Args:\r\n autoregressive_order: The maximum autoregressive lag.\r\n moving_average_order: The maximum moving average lag, after which\r\n transient deviations are expected to return to their long-term mean.\r\n configuration: A StateSpaceModelConfiguration object.\r\n \"\"\"\r\n self.ar_order = autoregressive_order\r\n self.ma_order = moving_average_order\r\n self.state_num_blocks = max(autoregressive_order, moving_average_order + 1)\r\n super(VARMA, self).__init__(configuration=configuration)\r\n self.state_dimension = self.state_num_blocks * self.num_features\r\n\r\n def _define_parameters(self, observation_transition_tradeoff_log=None):\r\n with variable_scope.variable_scope(self._variable_scope):\r\n # TODO(allenl): Evaluate parameter transformations for AR/MA coefficients\r\n # which improve interpretability/stability.\r\n self.ar_coefs = variable_scope.get_variable(\r\n name=\"ar_coefs\",\r\n shape=[self.num_features, self.num_features, self.ar_order],\r\n dtype=self.dtype,\r\n initializer=init_ops.zeros_initializer())\r\n self.ma_coefs = variable_scope.get_variable(\r\n name=\"ma_coefs\",\r\n initializer=array_ops.tile(\r\n linalg_ops.eye(self.num_features, dtype=self.dtype)[None, :, :],\r\n [self.ma_order, 1, 1]),\r\n dtype=self.dtype)\r\n super(VARMA, self)._define_parameters(\r\n observation_transition_tradeoff_log=observation_transition_tradeoff_log)\r\n\r\n def get_state_transition(self):\r\n \"\"\"Construct state transition matrix from VARMA parameters.\r\n\r\n Returns:\r\n the state transition matrix. It has shape\r\n [self.state_dimension, self.state_dimension].\r\n \"\"\"\r\n # Pad any unused AR blocks with zeros. The extra state is necessary if\r\n # ma_order >= ar_order.\r\n ar_coefs_padded = array_ops.reshape(\r\n array_ops.pad(self.ar_coefs,\r\n [[0, 0], [0, 0],\r\n [0, self.state_num_blocks - self.ar_order]]),\r\n [self.num_features, self.state_dimension])\r\n shift_matrix = array_ops.pad(\r\n linalg_ops.eye(\r\n (self.state_num_blocks - 1) * self.num_features, dtype=self.dtype),\r\n [[0, 0], [0, self.num_features]])\r\n return array_ops.concat([ar_coefs_padded, shift_matrix], axis=0)\r\n\r\n def get_noise_transform(self):\r\n \"\"\"Construct state noise transform matrix from VARMA parameters.\r\n\r\n Returns:\r\n the state noise transform matrix. It has shape\r\n [self.state_dimension, self.num_features].\r\n \"\"\"\r\n # Noise is broadcast, through the moving average coefficients, to\r\n # un-observed parts of the latent state.\r\n ma_coefs_padded = array_ops.reshape(\r\n array_ops.pad(self.ma_coefs,\r\n [[self.state_num_blocks - 1 - self.ma_order, 0], [0, 0],\r\n [0, 0]]),\r\n [(self.state_num_blocks - 1) * self.num_features, self.num_features],\r\n name=\"noise_transform\")\r\n # Deterministically apply noise to the oldest component.\r\n return array_ops.concat(\r\n [ma_coefs_padded,\r\n linalg_ops.eye(self.num_features, dtype=self.dtype)],\r\n axis=0)\r\n\r\n def get_observation_model(self, times):\r\n \"\"\"Construct observation model matrix from VARMA parameters.\r\n\r\n Args:\r\n times: A [batch size] vector indicating the times observation models are\r\n requested for. Unused.\r\n Returns:\r\n the observation model matrix. It has shape\r\n [self.num_features, self.state_dimension].\r\n \"\"\"\r\n del times # StateSpaceModel will broadcast along the batch dimension\r\n if self.ar_order > self.ma_order or self.state_num_blocks < 2:\r\n return array_ops.pad(\r\n linalg_ops.eye(self.num_features, dtype=self.dtype),\r\n [[0, 0], [0, self.num_features * (self.state_num_blocks - 1)]],\r\n name=\"observation_model\")\r\n else:\r\n # Add a second observed component which \"catches\" the accumulated moving\r\n # average errors as they reach the end of the state. If ar_order >\r\n # ma_order, this is unnecessary, since accumulated errors cycle naturally.\r\n return array_ops.concat(\r\n [\r\n array_ops.pad(\r\n linalg_ops.eye(self.num_features, dtype=self.dtype),\r\n [[0, 0], [0,\r\n self.num_features * (self.state_num_blocks - 2)]]),\r\n linalg_ops.eye(self.num_features, dtype=self.dtype)\r\n ],\r\n axis=1,\r\n name=\"observation_model\")\r\n\r\n def get_state_transition_noise_covariance(\r\n self, minimum_initial_variance=1e-5):\r\n # Most state space models use only an explicit observation noise term to\r\n # model deviations from expectations, and so a low initial transition noise\r\n # parameter is helpful there. Since deviations from expectations are also\r\n # modeled as transition noise in VARMA, we set its initial value based on a\r\n # slight over-estimate empirical observation noise.\r\n if self._input_statistics is not None:\r\n feature_variance = self._scale_variance(\r\n self._input_statistics.series_start_moments.variance)\r\n initial_transition_noise_scale = math_ops.log(\r\n math_ops.maximum(\r\n math_ops.reduce_mean(feature_variance), minimum_initial_variance))\r\n else:\r\n initial_transition_noise_scale = 0.\r\n state_noise_transform = ops.convert_to_tensor(\r\n self.get_noise_transform(), dtype=self.dtype)\r\n state_noise_dimension = tensor_shape.dimension_value(\r\n state_noise_transform.shape[1])\r\n return math_utils.variable_covariance_matrix(\r\n state_noise_dimension, \"state_transition_noise\",\r\n dtype=self.dtype,\r\n initial_overall_scale_log=initial_transition_noise_scale)\r\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for fft operations.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nfrom six.moves import xrange # pylint: disable=redefined-builtin\r\n\r\nfrom tensorflow.core.protobuf import config_pb2\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import errors\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import gen_spectral_ops\r\nfrom tensorflow.python.ops import gradient_checker\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import spectral_ops_test_util\r\nfrom tensorflow.python.ops.signal import fft_ops\r\nfrom tensorflow.python.platform import test\r\n\r\nVALID_FFT_RANKS = (1, 2, 3)\r\n\r\n\r\nclass BaseFFTOpsTest(test.TestCase):\r\n\r\n def _compare(self, x, rank, fft_length=None, use_placeholder=False,\r\n rtol=1e-4, atol=1e-4):\r\n self._compareForward(x, rank, fft_length, use_placeholder, rtol, atol)\r\n self._compareBackward(x, rank, fft_length, use_placeholder, rtol, atol)\r\n\r\n def _compareForward(self, x, rank, fft_length=None, use_placeholder=False,\r\n rtol=1e-4, atol=1e-4):\r\n x_np = self._npFFT(x, rank, fft_length)\r\n if use_placeholder:\r\n x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))\r\n x_tf = self._tfFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})\r\n else:\r\n x_tf = self._tfFFT(x, rank, fft_length)\r\n\r\n self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)\r\n\r\n def _compareBackward(self, x, rank, fft_length=None, use_placeholder=False,\r\n rtol=1e-4, atol=1e-4):\r\n x_np = self._npIFFT(x, rank, fft_length)\r\n if use_placeholder:\r\n x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))\r\n x_tf = self._tfIFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})\r\n else:\r\n x_tf = self._tfIFFT(x, rank, fft_length)\r\n\r\n self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)\r\n\r\n def _checkMemoryFail(self, x, rank):\r\n config = config_pb2.ConfigProto()\r\n config.gpu_options.per_process_gpu_memory_fraction = 1e-2\r\n with self.cached_session(config=config, force_gpu=True):\r\n self._tfFFT(x, rank, fft_length=None)\r\n\r\n def _checkGradComplex(self, func, x, y, result_is_complex=True,\r\n rtol=1e-2, atol=1e-2):\r\n with self.cached_session(use_gpu=True):\r\n inx = ops.convert_to_tensor(x)\r\n iny = ops.convert_to_tensor(y)\r\n # func is a forward or inverse, real or complex, batched or unbatched FFT\r\n # function with a complex input.\r\n z = func(math_ops.complex(inx, iny))\r\n # loss = sum(|z|^2)\r\n loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))\r\n\r\n ((x_jacob_t, x_jacob_n),\r\n (y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(\r\n [inx, iny], [list(x.shape), list(y.shape)],\r\n loss, [1],\r\n x_init_value=[x, y],\r\n delta=1e-2)\r\n\r\n self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)\r\n self.assertAllClose(y_jacob_t, y_jacob_n, rtol=rtol, atol=atol)\r\n\r\n def _checkGradReal(self, func, x, rtol=1e-2, atol=1e-2):\r\n with self.cached_session(use_gpu=True):\r\n inx = ops.convert_to_tensor(x)\r\n # func is a forward RFFT function (batched or unbatched).\r\n z = func(inx)\r\n # loss = sum(|z|^2)\r\n loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))\r\n x_jacob_t, x_jacob_n = test.compute_gradient(\r\n inx, list(x.shape), loss, [1], x_init_value=x, delta=1e-2)\r\n\r\n self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)\r\n\r\n\r\nclass FFTOpsTest(BaseFFTOpsTest):\r\n\r\n def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):\r\n # fft_length unused for complex FFTs.\r\n with self.cached_session(use_gpu=True) as sess:\r\n return sess.run(self._tfFFTForRank(rank)(x), feed_dict=feed_dict)\r\n\r\n def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):\r\n # fft_length unused for complex FFTs.\r\n with self.cached_session(use_gpu=True) as sess:\r\n return sess.run(self._tfIFFTForRank(rank)(x), feed_dict=feed_dict)\r\n\r\n def _npFFT(self, x, rank, fft_length=None):\r\n if rank == 1:\r\n return np.fft.fft2(x, s=fft_length, axes=(-1,))\r\n elif rank == 2:\r\n return np.fft.fft2(x, s=fft_length, axes=(-2, -1))\r\n elif rank == 3:\r\n return np.fft.fft2(x, s=fft_length, axes=(-3, -2, -1))\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n def _npIFFT(self, x, rank, fft_length=None):\r\n if rank == 1:\r\n return np.fft.ifft2(x, s=fft_length, axes=(-1,))\r\n elif rank == 2:\r\n return np.fft.ifft2(x, s=fft_length, axes=(-2, -1))\r\n elif rank == 3:\r\n return np.fft.ifft2(x, s=fft_length, axes=(-3, -2, -1))\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n def _tfFFTForRank(self, rank):\r\n if rank == 1:\r\n return fft_ops.fft\r\n elif rank == 2:\r\n return fft_ops.fft2d\r\n elif rank == 3:\r\n return fft_ops.fft3d\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n def _tfIFFTForRank(self, rank):\r\n if rank == 1:\r\n return fft_ops.ifft\r\n elif rank == 2:\r\n return fft_ops.ifft2d\r\n elif rank == 3:\r\n return fft_ops.ifft3d\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n @test_util.run_deprecated_v1\r\n def testEmpty(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type in (np.complex64, np.complex128):\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n x = np.zeros((0,) * dims).astype(np_type)\r\n self.assertEqual(x.shape, self._tfFFT(x, rank).shape)\r\n self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)\r\n\r\n @test_util.run_deprecated_v1\r\n def testBasic(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-8)):\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n self._compare(\r\n np.mod(np.arange(np.power(4, dims)), 10).reshape(\r\n (4,) * dims).astype(np_type), rank, rtol=tol, atol=tol)\r\n\r\n def testLargeBatch(self):\r\n if test.is_gpu_available(cuda_only=True):\r\n rank = 1\r\n for dims in xrange(rank, rank + 3):\r\n for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-5)):\r\n self._compare(\r\n np.mod(np.arange(np.power(128, dims)), 10).reshape(\r\n (128,) * dims).astype(np_type), rank, rtol=tol, atol=tol)\r\n\r\n # TODO(yangzihao): Disable before we can figure out a way to\r\n # properly test memory fail for large batch fft.\r\n # def testLargeBatchMemoryFail(self):\r\n # if test.is_gpu_available(cuda_only=True):\r\n # rank = 1\r\n # for dims in xrange(rank, rank + 3):\r\n # self._checkMemoryFail(\r\n # np.mod(np.arange(np.power(128, dims)), 64).reshape(\r\n # (128,) * dims).astype(np.complex64), rank)\r\n\r\n @test_util.run_deprecated_v1\r\n def testBasicPlaceholder(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-8)):\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n self._compare(\r\n np.mod(np.arange(np.power(4, dims)), 10).reshape(\r\n (4,) * dims).astype(np_type),\r\n rank, use_placeholder=True, rtol=tol, atol=tol)\r\n\r\n @test_util.run_deprecated_v1\r\n def testRandom(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 5e-6)):\r\n def gen(shape):\r\n n = np.prod(shape)\r\n re = np.random.uniform(size=n)\r\n im = np.random.uniform(size=n)\r\n return (re + im * 1j).reshape(shape)\r\n\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n self._compare(gen((4,) * dims).astype(np_type), rank,\r\n rtol=tol, atol=tol)\r\n\r\n @test_util.run_deprecated_v1\r\n def testRandom1D(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type in (np.complex64, np.complex128):\r\n has_gpu = test.is_gpu_available(cuda_only=True)\r\n tol = {(np.complex64, True): 1e-4,\r\n (np.complex64, False): 1e-2,\r\n (np.complex128, True): 1e-4,\r\n (np.complex128, False): 1e-2}[(np_type, has_gpu)]\r\n def gen(shape):\r\n n = np.prod(shape)\r\n re = np.random.uniform(size=n)\r\n im = np.random.uniform(size=n)\r\n return (re + im * 1j).reshape(shape)\r\n\r\n # Check a variety of power-of-2 FFT sizes.\r\n for dim in (128, 256, 512, 1024):\r\n self._compare(gen((dim,)).astype(np_type), 1, rtol=tol, atol=tol)\r\n\r\n # Check a variety of non-power-of-2 FFT sizes.\r\n for dim in (127, 255, 511, 1023):\r\n self._compare(gen((dim,)).astype(np_type), 1, rtol=tol, atol=tol)\r\n\r\n @test_util.run_deprecated_v1\r\n def testError(self):\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(0, rank):\r\n x = np.zeros((1,) * dims).astype(np.complex64)\r\n with self.assertRaisesWithPredicateMatch(\r\n ValueError, \"Shape must be .*rank {}.*\".format(rank)):\r\n self._tfFFT(x, rank)\r\n with self.assertRaisesWithPredicateMatch(\r\n ValueError, \"Shape must be .*rank {}.*\".format(rank)):\r\n self._tfIFFT(x, rank)\r\n\r\n @test_util.run_deprecated_v1\r\n def testGrad_Simple(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type, tol in ((np.float32, 1e-4), (np.float64, 1e-10)):\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 2):\r\n re = np.ones(shape=(4,) * dims, dtype=np_type) / 10.0\r\n im = np.zeros(shape=(4,) * dims, dtype=np_type)\r\n self._checkGradComplex(self._tfFFTForRank(rank), re, im,\r\n rtol=tol, atol=tol)\r\n self._checkGradComplex(self._tfIFFTForRank(rank), re, im,\r\n rtol=tol, atol=tol)\r\n\r\n @test_util.run_deprecated_v1\r\n def testGrad_Random(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for np_type, tol in ((np.float32, 1e-2), (np.float64, 1e-10)):\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 2):\r\n re = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1\r\n im = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1\r\n self._checkGradComplex(self._tfFFTForRank(rank), re, im,\r\n rtol=tol, atol=tol)\r\n self._checkGradComplex(self._tfIFFTForRank(rank), re, im,\r\n rtol=tol, atol=tol)\r\n\r\n\r\nclass RFFTOpsTest(BaseFFTOpsTest):\r\n\r\n def _compareBackward(self, x, rank, fft_length=None, use_placeholder=False):\r\n super(RFFTOpsTest, self)._compareBackward(x, rank, fft_length,\r\n use_placeholder)\r\n\r\n def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):\r\n with self.cached_session(use_gpu=True) as sess:\r\n return sess.run(\r\n self._tfFFTForRank(rank)(x, fft_length), feed_dict=feed_dict)\r\n\r\n def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):\r\n with self.cached_session(use_gpu=True) as sess:\r\n return sess.run(\r\n self._tfIFFTForRank(rank)(x, fft_length), feed_dict=feed_dict)\r\n\r\n def _npFFT(self, x, rank, fft_length=None):\r\n if rank == 1:\r\n return np.fft.rfft2(x, s=fft_length, axes=(-1,))\r\n elif rank == 2:\r\n return np.fft.rfft2(x, s=fft_length, axes=(-2, -1))\r\n elif rank == 3:\r\n return np.fft.rfft2(x, s=fft_length, axes=(-3, -2, -1))\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n def _npIFFT(self, x, rank, fft_length=None):\r\n if rank == 1:\r\n return np.fft.irfft2(x, s=fft_length, axes=(-1,))\r\n elif rank == 2:\r\n return np.fft.irfft2(x, s=fft_length, axes=(-2, -1))\r\n elif rank == 3:\r\n return np.fft.irfft2(x, s=fft_length, axes=(-3, -2, -1))\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n def _tfFFTForRank(self, rank):\r\n if rank == 1:\r\n return fft_ops.rfft\r\n elif rank == 2:\r\n return fft_ops.rfft2d\r\n elif rank == 3:\r\n return fft_ops.rfft3d\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n def _tfIFFTForRank(self, rank):\r\n if rank == 1:\r\n return fft_ops.irfft\r\n elif rank == 2:\r\n return fft_ops.irfft2d\r\n elif rank == 3:\r\n return fft_ops.irfft3d\r\n else:\r\n raise ValueError(\"invalid rank\")\r\n\r\n @test_util.run_deprecated_v1\r\n def testEmpty(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n x = np.zeros((0,) * dims).astype(np.float32)\r\n self.assertEqual(x.shape, self._tfFFT(x, rank).shape)\r\n x = np.zeros((0,) * dims).astype(np.complex64)\r\n self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)\r\n\r\n @test_util.run_deprecated_v1\r\n def testBasic(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n for size in (5, 6):\r\n inner_dim = size // 2 + 1\r\n r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(\r\n (size,) * dims)\r\n self._compareForward(r2c.astype(np.float32), rank, (size,) * rank)\r\n c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),\r\n 10).reshape((size,) * (dims - 1) + (inner_dim,))\r\n self._compareBackward(\r\n c2r.astype(np.complex64), rank, (size,) * rank)\r\n\r\n def testLargeBatch(self):\r\n if test.is_gpu_available(cuda_only=True):\r\n rank = 1\r\n for dims in xrange(rank, rank + 3):\r\n for size in (64, 128):\r\n inner_dim = size // 2 + 1\r\n r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(\r\n (size,) * dims)\r\n self._compareForward(r2c.astype(np.float32), rank, (size,) * rank)\r\n c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),\r\n 10).reshape((size,) * (dims - 1) + (inner_dim,))\r\n self._compareBackward(c2r.astype(np.complex64), rank, (size,) * rank)\r\n\r\n @test_util.run_deprecated_v1\r\n def testBasicPlaceholder(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n for size in (5, 6):\r\n inner_dim = size // 2 + 1\r\n r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(\r\n (size,) * dims)\r\n self._compareForward(\r\n r2c.astype(np.float32),\r\n rank, (size,) * rank,\r\n use_placeholder=True)\r\n c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),\r\n 10).reshape((size,) * (dims - 1) + (inner_dim,))\r\n self._compareBackward(\r\n c2r.astype(np.complex64),\r\n rank, (size,) * rank,\r\n use_placeholder=True)\r\n\r\n @test_util.run_deprecated_v1\r\n def testFftLength(self):\r\n if test.is_gpu_available(cuda_only=True):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n for size in (5, 6):\r\n inner_dim = size // 2 + 1\r\n r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(\r\n (size,) * dims)\r\n c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),\r\n 10).reshape((size,) * (dims - 1) + (inner_dim,))\r\n # Test truncation (FFT size < dimensions).\r\n fft_length = (size - 2,) * rank\r\n self._compareForward(r2c.astype(np.float32), rank, fft_length)\r\n self._compareBackward(c2r.astype(np.complex64), rank, fft_length)\r\n # Confirm it works with unknown shapes as well.\r\n self._compareForward(\r\n r2c.astype(np.float32),\r\n rank,\r\n fft_length,\r\n use_placeholder=True)\r\n self._compareBackward(\r\n c2r.astype(np.complex64),\r\n rank,\r\n fft_length,\r\n use_placeholder=True)\r\n # Test padding (FFT size > dimensions).\r\n fft_length = (size + 2,) * rank\r\n self._compareForward(r2c.astype(np.float32), rank, fft_length)\r\n self._compareBackward(c2r.astype(np.complex64), rank, fft_length)\r\n # Confirm it works with unknown shapes as well.\r\n self._compareForward(\r\n r2c.astype(np.float32),\r\n rank,\r\n fft_length,\r\n use_placeholder=True)\r\n self._compareBackward(\r\n c2r.astype(np.complex64),\r\n rank,\r\n fft_length,\r\n use_placeholder=True)\r\n\r\n @test_util.run_deprecated_v1\r\n def testRandom(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n def gen_real(shape):\r\n n = np.prod(shape)\r\n re = np.random.uniform(size=n)\r\n ret = re.reshape(shape)\r\n return ret\r\n\r\n def gen_complex(shape):\r\n n = np.prod(shape)\r\n re = np.random.uniform(size=n)\r\n im = np.random.uniform(size=n)\r\n ret = (re + im * 1j).reshape(shape)\r\n return ret\r\n\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(rank, rank + 3):\r\n for size in (5, 6):\r\n inner_dim = size // 2 + 1\r\n self._compareForward(gen_real((size,) * dims), rank, (size,) * rank)\r\n complex_dims = (size,) * (dims - 1) + (inner_dim,)\r\n self._compareBackward(\r\n gen_complex(complex_dims), rank, (size,) * rank)\r\n\r\n @test_util.run_deprecated_v1\r\n def testError(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n for dims in xrange(0, rank):\r\n x = np.zeros((1,) * dims).astype(np.complex64)\r\n with self.assertRaisesWithPredicateMatch(\r\n ValueError, \"Shape .* must have rank at least {}\".format(rank)):\r\n self._tfFFT(x, rank)\r\n with self.assertRaisesWithPredicateMatch(\r\n ValueError, \"Shape .* must have rank at least {}\".format(rank)):\r\n self._tfIFFT(x, rank)\r\n for dims in xrange(rank, rank + 2):\r\n x = np.zeros((1,) * rank)\r\n\r\n # Test non-rank-1 fft_length produces an error.\r\n fft_length = np.zeros((1, 1)).astype(np.int32)\r\n with self.assertRaisesWithPredicateMatch(ValueError,\r\n \"Shape .* must have rank 1\"):\r\n self._tfFFT(x, rank, fft_length)\r\n with self.assertRaisesWithPredicateMatch(ValueError,\r\n \"Shape .* must have rank 1\"):\r\n self._tfIFFT(x, rank, fft_length)\r\n\r\n # Test wrong fft_length length.\r\n fft_length = np.zeros((rank + 1,)).astype(np.int32)\r\n with self.assertRaisesWithPredicateMatch(\r\n ValueError, \"Dimension must be .*but is {}.*\".format(rank + 1)):\r\n self._tfFFT(x, rank, fft_length)\r\n with self.assertRaisesWithPredicateMatch(\r\n ValueError, \"Dimension must be .*but is {}.*\".format(rank + 1)):\r\n self._tfIFFT(x, rank, fft_length)\r\n\r\n # Test that calling the kernel directly without padding to fft_length\r\n # produces an error.\r\n rffts_for_rank = {\r\n 1: [gen_spectral_ops.rfft, gen_spectral_ops.irfft],\r\n 2: [gen_spectral_ops.rfft2d, gen_spectral_ops.irfft2d],\r\n 3: [gen_spectral_ops.rfft3d, gen_spectral_ops.irfft3d]\r\n }\r\n rfft_fn, irfft_fn = rffts_for_rank[rank]\r\n with self.assertRaisesWithPredicateMatch(\r\n errors.InvalidArgumentError,\r\n \"Input dimension .* must have length of at least 6 but got: 5\"):\r\n x = np.zeros((5,) * rank).astype(np.float32)\r\n fft_length = [6] * rank\r\n with self.cached_session():\r\n self.evaluate(rfft_fn(x, fft_length))\r\n\r\n with self.assertRaisesWithPredicateMatch(\r\n errors.InvalidArgumentError,\r\n \"Input dimension .* must have length of at least .* but got: 3\"):\r\n x = np.zeros((3,) * rank).astype(np.complex64)\r\n fft_length = [6] * rank\r\n with self.cached_session():\r\n self.evaluate(irfft_fn(x, fft_length))\r\n\r\n @test_util.run_deprecated_v1\r\n def testGrad_Simple(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n # rfft3d/irfft3d do not have gradients yet.\r\n if rank == 3:\r\n continue\r\n for dims in xrange(rank, rank + 2):\r\n for size in (5, 6):\r\n re = np.ones(shape=(size,) * dims, dtype=np.float32)\r\n im = -np.ones(shape=(size,) * dims, dtype=np.float32)\r\n self._checkGradReal(self._tfFFTForRank(rank), re)\r\n self._checkGradComplex(\r\n self._tfIFFTForRank(rank), re, im, result_is_complex=False)\r\n\r\n @test_util.run_deprecated_v1\r\n def testGrad_Random(self):\r\n with spectral_ops_test_util.fft_kernel_label_map():\r\n for rank in VALID_FFT_RANKS:\r\n # rfft3d/irfft3d do not have gradients yet.\r\n if rank == 3:\r\n continue\r\n for dims in xrange(rank, rank + 2):\r\n for size in (5, 6):\r\n re = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1\r\n im = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1\r\n self._checkGradReal(self._tfFFTForRank(rank), re)\r\n self._checkGradComplex(\r\n self._tfIFFTForRank(rank), re, im, result_is_complex=False)\r\n\r\n\r\nclass FFTShiftTest(test.TestCase):\r\n\r\n @test_util.run_deprecated_v1\r\n def testDefinition(self):\r\n with self.session():\r\n x = [0, 1, 2, 3, 4, -4, -3, -2, -1]\r\n y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]\r\n self.assertAllEqual(fft_ops.fftshift(x).eval(), y)\r\n self.assertAllEqual(fft_ops.ifftshift(y).eval(), x)\r\n x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]\r\n y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]\r\n self.assertAllEqual(fft_ops.fftshift(x).eval(), y)\r\n self.assertAllEqual(fft_ops.ifftshift(y).eval(), x)\r\n\r\n @test_util.run_deprecated_v1\r\n def testAxesKeyword(self):\r\n with self.session():\r\n freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]\r\n shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]\r\n self.assertAllEqual(fft_ops.fftshift(freqs, axes=(0, 1)).eval(), shifted)\r\n self.assertAllEqual(\r\n fft_ops.fftshift(freqs, axes=0).eval(),\r\n fft_ops.fftshift(freqs, axes=(0,)).eval())\r\n self.assertAllEqual(fft_ops.ifftshift(shifted, axes=(0, 1)).eval(), freqs)\r\n self.assertAllEqual(\r\n fft_ops.ifftshift(shifted, axes=0).eval(),\r\n fft_ops.ifftshift(shifted, axes=(0,)).eval())\r\n self.assertAllEqual(fft_ops.fftshift(freqs).eval(), shifted)\r\n self.assertAllEqual(fft_ops.ifftshift(shifted).eval(), freqs)\r\n\r\n @test_util.run_deprecated_v1\r\n def testNumpyCompatibility(self):\r\n with self.session():\r\n x = [0, 1, 2, 3, 4, -4, -3, -2, -1]\r\n y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]\r\n self.assertAllEqual(fft_ops.fftshift(x).eval(), np.fft.fftshift(x))\r\n self.assertAllEqual(fft_ops.ifftshift(y).eval(), np.fft.ifftshift(y))\r\n x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]\r\n y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]\r\n self.assertAllEqual(fft_ops.fftshift(x).eval(), np.fft.fftshift(x))\r\n self.assertAllEqual(fft_ops.ifftshift(y).eval(), np.fft.ifftshift(y))\r\n freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]\r\n shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]\r\n self.assertAllEqual(\r\n fft_ops.fftshift(freqs, axes=(0, 1)).eval(),\r\n np.fft.fftshift(freqs, axes=(0, 1)))\r\n self.assertAllEqual(\r\n fft_ops.ifftshift(shifted, axes=(0, 1)).eval(),\r\n np.fft.ifftshift(shifted, axes=(0, 1)))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for the `LatencyAllEdges` optimization.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.data.experimental.kernel_tests import stats_dataset_test_base\r\nfrom tensorflow.python.data.experimental.ops import optimization\r\nfrom tensorflow.python.data.experimental.ops import stats_aggregator\r\nfrom tensorflow.python.data.ops import dataset_ops\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\nclass LatencyAllEdgesTest(stats_dataset_test_base.StatsDatasetTestBase):\r\n\r\n def testLatencyStatsOptimization(self):\r\n aggregator = stats_aggregator.StatsAggregator()\r\n dataset = dataset_ops.Dataset.from_tensors(1).apply(\r\n optimization.assert_next(\r\n [\"LatencyStats\", \"Map\", \"LatencyStats\", \"Prefetch\",\r\n \"LatencyStats\"])).map(lambda x: x * x).prefetch(1)\r\n options = dataset_ops.Options()\r\n options.experimental_optimization.apply_default_optimizations = False\r\n options.experimental_stats.latency_all_edges = True\r\n options.experimental_stats.aggregator = aggregator\r\n dataset = dataset.with_options(options)\r\n self.assertDatasetProduces(\r\n dataset,\r\n expected_output=[1],\r\n requires_initialization=True,\r\n num_test_iterations=1)\r\n handle = self.getHandle(aggregator)\r\n self.assertStatisticsHasCount(\r\n handle, self.regexForNodeName(\"record_latency::TensorDataset\"), 1)\r\n self.assertStatisticsHasCount(\r\n handle, self.regexForNodeName(\"record_latency::MapDataset\"), 1)\r\n self.assertStatisticsHasCount(\r\n handle, self.regexForNodeName(\"record_latency::PrefetchDataset\"), 1)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Core module for TensorFlow distribution objects and helpers.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.util import deprecation\r\n\r\n\r\n# pylint: disable=wildcard-import,unused-import,g-import-not-at-top\r\nwith deprecation.silence():\r\n from tensorflow.python.ops.distributions.bernoulli import Bernoulli\r\n from tensorflow.python.ops.distributions.beta import Beta\r\n from tensorflow.python.ops.distributions.categorical import Categorical\r\n from tensorflow.python.ops.distributions.dirichlet import Dirichlet\r\n from tensorflow.python.ops.distributions.dirichlet_multinomial import DirichletMultinomial\r\n from tensorflow.python.ops.distributions.distribution import *\r\n from tensorflow.python.ops.distributions.exponential import Exponential\r\n from tensorflow.python.ops.distributions.gamma import Gamma\r\n from tensorflow.python.ops.distributions.kullback_leibler import *\r\n from tensorflow.python.ops.distributions.laplace import Laplace\r\n from tensorflow.python.ops.distributions.multinomial import Multinomial\r\n from tensorflow.python.ops.distributions.normal import Normal\r\n from tensorflow.python.ops.distributions.student_t import StudentT\r\n from tensorflow.python.ops.distributions.uniform import Uniform\r\n# pylint: enable=wildcard-import,unused-import\r\ndel deprecation\r\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for MultivariateNormalFullCovariance.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nfrom scipy import stats\r\nfrom tensorflow.contrib import distributions\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import nn_ops\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\nds = distributions\r\nrng = np.random.RandomState(42)\r\n\r\n\r\nclass MultivariateNormalFullCovarianceTest(test.TestCase):\r\n\r\n def _random_pd_matrix(self, *shape):\r\n mat = rng.rand(*shape)\r\n chol = ds.matrix_diag_transform(mat, transform=nn_ops.softplus)\r\n chol = array_ops.matrix_band_part(chol, -1, 0)\r\n return math_ops.matmul(chol, chol, adjoint_b=True).eval()\r\n\r\n def testRaisesIfInitializedWithNonSymmetricMatrix(self):\r\n with self.cached_session():\r\n mu = [1., 2.]\r\n sigma = [[1., 0.], [1., 1.]] # Nonsingular, but not symmetric\r\n mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)\r\n with self.assertRaisesOpError(\"not symmetric\"):\r\n mvn.covariance().eval()\r\n\r\n def testNamePropertyIsSetByInitArg(self):\r\n with self.cached_session():\r\n mu = [1., 2.]\r\n sigma = [[1., 0.], [0., 1.]]\r\n mvn = ds.MultivariateNormalFullCovariance(mu, sigma, name=\"Billy\")\r\n self.assertEqual(mvn.name, \"Billy/\")\r\n\r\n def testDoesNotRaiseIfInitializedWithSymmetricMatrix(self):\r\n with self.cached_session():\r\n mu = rng.rand(10)\r\n sigma = self._random_pd_matrix(10, 10)\r\n mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)\r\n # Should not raise\r\n mvn.covariance().eval()\r\n\r\n def testLogPDFScalarBatch(self):\r\n with self.cached_session():\r\n mu = rng.rand(2)\r\n sigma = self._random_pd_matrix(2, 2)\r\n mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)\r\n x = rng.rand(2)\r\n\r\n log_pdf = mvn.log_prob(x)\r\n pdf = mvn.prob(x)\r\n\r\n scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)\r\n\r\n expected_log_pdf = scipy_mvn.logpdf(x)\r\n expected_pdf = scipy_mvn.pdf(x)\r\n self.assertEqual((), log_pdf.get_shape())\r\n self.assertEqual((), pdf.get_shape())\r\n self.assertAllClose(expected_log_pdf, log_pdf.eval())\r\n self.assertAllClose(expected_pdf, pdf.eval())\r\n\r\n def testLogPDFScalarBatchCovarianceNotProvided(self):\r\n with self.cached_session():\r\n mu = rng.rand(2)\r\n mvn = ds.MultivariateNormalFullCovariance(\r\n mu, covariance_matrix=None, validate_args=True)\r\n x = rng.rand(2)\r\n\r\n log_pdf = mvn.log_prob(x)\r\n pdf = mvn.prob(x)\r\n\r\n # Initialize a scipy_mvn with the default covariance.\r\n scipy_mvn = stats.multivariate_normal(mean=mu, cov=np.eye(2))\r\n\r\n expected_log_pdf = scipy_mvn.logpdf(x)\r\n expected_pdf = scipy_mvn.pdf(x)\r\n self.assertEqual((), log_pdf.get_shape())\r\n self.assertEqual((), pdf.get_shape())\r\n self.assertAllClose(expected_log_pdf, log_pdf.eval())\r\n self.assertAllClose(expected_pdf, pdf.eval())\r\n\r\n def testShapes(self):\r\n with self.cached_session():\r\n mu = rng.rand(3, 5, 2)\r\n covariance = self._random_pd_matrix(3, 5, 2, 2)\r\n\r\n mvn = ds.MultivariateNormalFullCovariance(\r\n mu, covariance, validate_args=True)\r\n\r\n # Shapes known at graph construction time.\r\n self.assertEqual((2,), tuple(mvn.event_shape.as_list()))\r\n self.assertEqual((3, 5), tuple(mvn.batch_shape.as_list()))\r\n\r\n # Shapes known at runtime.\r\n self.assertEqual((2,), tuple(mvn.event_shape_tensor().eval()))\r\n self.assertEqual((3, 5), tuple(mvn.batch_shape_tensor().eval()))\r\n\r\n def _random_mu_and_sigma(self, batch_shape, event_shape):\r\n # This ensures sigma is positive def.\r\n mat_shape = batch_shape + event_shape + event_shape\r\n mat = rng.randn(*mat_shape)\r\n perm = np.arange(mat.ndim)\r\n perm[-2:] = [perm[-1], perm[-2]]\r\n sigma = np.matmul(mat, np.transpose(mat, perm))\r\n\r\n mu_shape = batch_shape + event_shape\r\n mu = rng.randn(*mu_shape)\r\n\r\n return mu, sigma\r\n\r\n def testKLBatch(self):\r\n batch_shape = [2]\r\n event_shape = [3]\r\n with self.cached_session():\r\n mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)\r\n mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)\r\n mvn_a = ds.MultivariateNormalFullCovariance(\r\n loc=mu_a,\r\n covariance_matrix=sigma_a,\r\n validate_args=True)\r\n mvn_b = ds.MultivariateNormalFullCovariance(\r\n loc=mu_b,\r\n covariance_matrix=sigma_b,\r\n validate_args=True)\r\n\r\n kl = ds.kl_divergence(mvn_a, mvn_b)\r\n self.assertEqual(batch_shape, kl.get_shape())\r\n\r\n kl_v = kl.eval()\r\n expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],\r\n mu_b[0, :], sigma_b[0, :])\r\n expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],\r\n mu_b[1, :], sigma_b[1, :])\r\n self.assertAllClose(expected_kl_0, kl_v[0])\r\n self.assertAllClose(expected_kl_1, kl_v[1])\r\n\r\n def testKLBatchBroadcast(self):\r\n batch_shape = [2]\r\n event_shape = [3]\r\n with self.cached_session():\r\n mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)\r\n # No batch shape.\r\n mu_b, sigma_b = self._random_mu_and_sigma([], event_shape)\r\n mvn_a = ds.MultivariateNormalFullCovariance(\r\n loc=mu_a,\r\n covariance_matrix=sigma_a,\r\n validate_args=True)\r\n mvn_b = ds.MultivariateNormalFullCovariance(\r\n loc=mu_b,\r\n covariance_matrix=sigma_b,\r\n validate_args=True)\r\n\r\n kl = ds.kl_divergence(mvn_a, mvn_b)\r\n self.assertEqual(batch_shape, kl.get_shape())\r\n\r\n kl_v = kl.eval()\r\n expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],\r\n mu_b, sigma_b)\r\n expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],\r\n mu_b, sigma_b)\r\n self.assertAllClose(expected_kl_0, kl_v[0])\r\n self.assertAllClose(expected_kl_1, kl_v[1])\r\n\r\n\r\ndef _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b):\r\n \"\"\"Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b).\"\"\"\r\n # Check using numpy operations\r\n # This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy.\r\n # So it is important to also check that KL(mvn, mvn) = 0.\r\n sigma_b_inv = np.linalg.inv(sigma_b)\r\n\r\n t = np.trace(sigma_b_inv.dot(sigma_a))\r\n q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a)\r\n k = mu_a.shape[0]\r\n l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a))\r\n\r\n return 0.5 * (t + q - k + l)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for Keras initializers.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\n\r\nfrom tensorflow.python import keras\r\nfrom tensorflow.python import tf2\r\nfrom tensorflow.python.framework import test_util\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import init_ops\r\nfrom tensorflow.python.platform import test\r\n\r\n\r\n@test_util.run_all_in_graph_and_eager_modes\r\nclass KerasInitializersTest(test.TestCase):\r\n\r\n def _runner(self, init, shape, target_mean=None, target_std=None,\r\n target_max=None, target_min=None):\r\n variable = keras.backend.variable(init(shape))\r\n output = keras.backend.get_value(variable)\r\n # Test serialization (assumes deterministic behavior).\r\n config = init.get_config()\r\n reconstructed_init = init.__class__.from_config(config)\r\n variable = keras.backend.variable(reconstructed_init(shape))\r\n output_2 = keras.backend.get_value(variable)\r\n self.assertAllClose(output, output_2, atol=1e-4)\r\n\r\n def test_uniform(self):\r\n tensor_shape = (9, 6, 7)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.RandomUniformV2(minval=-1, maxval=1, seed=124),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_max=1,\r\n target_min=-1)\r\n\r\n def test_normal(self):\r\n tensor_shape = (8, 12, 99)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.RandomNormalV2(mean=0, stddev=1, seed=153),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=1)\r\n\r\n def test_truncated_normal(self):\r\n tensor_shape = (12, 99, 7)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.TruncatedNormalV2(mean=0, stddev=1, seed=126),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_max=2,\r\n target_min=-2)\r\n\r\n def test_constant(self):\r\n tensor_shape = (5, 6, 4)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.ConstantV2(2.),\r\n tensor_shape,\r\n target_mean=2,\r\n target_max=2,\r\n target_min=2)\r\n\r\n def test_lecun_uniform(self):\r\n tensor_shape = (5, 6, 4, 2)\r\n with self.cached_session():\r\n fan_in, _ = init_ops._compute_fans(tensor_shape)\r\n std = np.sqrt(1. / fan_in)\r\n self._runner(\r\n keras.initializers.lecun_uniformV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=std)\r\n\r\n def test_glorot_uniform(self):\r\n tensor_shape = (5, 6, 4, 2)\r\n with self.cached_session():\r\n fan_in, fan_out = init_ops._compute_fans(tensor_shape)\r\n std = np.sqrt(2. / (fan_in + fan_out))\r\n self._runner(\r\n keras.initializers.GlorotUniformV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=std)\r\n\r\n def test_he_uniform(self):\r\n tensor_shape = (5, 6, 4, 2)\r\n with self.cached_session():\r\n fan_in, _ = init_ops._compute_fans(tensor_shape)\r\n std = np.sqrt(2. / fan_in)\r\n self._runner(\r\n keras.initializers.he_uniformV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=std)\r\n\r\n def test_lecun_normal(self):\r\n tensor_shape = (5, 6, 4, 2)\r\n with self.cached_session():\r\n fan_in, _ = init_ops._compute_fans(tensor_shape)\r\n std = np.sqrt(1. / fan_in)\r\n self._runner(\r\n keras.initializers.lecun_normalV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=std)\r\n\r\n def test_glorot_normal(self):\r\n tensor_shape = (5, 6, 4, 2)\r\n with self.cached_session():\r\n fan_in, fan_out = init_ops._compute_fans(tensor_shape)\r\n std = np.sqrt(2. / (fan_in + fan_out))\r\n self._runner(\r\n keras.initializers.GlorotNormalV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=std)\r\n\r\n def test_he_normal(self):\r\n tensor_shape = (5, 6, 4, 2)\r\n with self.cached_session():\r\n fan_in, _ = init_ops._compute_fans(tensor_shape)\r\n std = np.sqrt(2. / fan_in)\r\n self._runner(\r\n keras.initializers.he_normalV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_std=std)\r\n\r\n def test_orthogonal(self):\r\n tensor_shape = (20, 20)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.OrthogonalV2(seed=123),\r\n tensor_shape,\r\n target_mean=0.)\r\n\r\n def test_identity(self):\r\n with self.cached_session():\r\n tensor_shape = (3, 4, 5)\r\n with self.assertRaises(ValueError):\r\n self._runner(\r\n keras.initializers.IdentityV2(),\r\n tensor_shape,\r\n target_mean=1. / tensor_shape[0],\r\n target_max=1.)\r\n\r\n tensor_shape = (3, 3)\r\n self._runner(\r\n keras.initializers.IdentityV2(),\r\n tensor_shape,\r\n target_mean=1. / tensor_shape[0],\r\n target_max=1.)\r\n\r\n def test_zero(self):\r\n tensor_shape = (4, 5)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.ZerosV2(),\r\n tensor_shape,\r\n target_mean=0.,\r\n target_max=0.)\r\n\r\n def test_one(self):\r\n tensor_shape = (4, 5)\r\n with self.cached_session():\r\n self._runner(\r\n keras.initializers.OnesV2(),\r\n tensor_shape,\r\n target_mean=1.,\r\n target_max=1.)\r\n\r\n def test_default_random_uniform(self):\r\n ru = keras.initializers.get('uniform')\r\n self.assertEqual(ru.minval, -0.05)\r\n self.assertEqual(ru.maxval, 0.05)\r\n\r\n def test_default_random_normal(self):\r\n rn = keras.initializers.get('normal')\r\n self.assertEqual(rn.mean, 0.0)\r\n self.assertEqual(rn.stddev, 0.05)\r\n\r\n def test_default_truncated_normal(self):\r\n tn = keras.initializers.get('truncated_normal')\r\n self.assertEqual(tn.mean, 0.0)\r\n self.assertEqual(tn.stddev, 0.05)\r\n\r\n def test_initializer_v2_get(self):\r\n tf2_force_enabled = tf2._force_enable # pylint: disable=protected-access\r\n try:\r\n tf2.enable()\r\n rn = keras.initializers.get('random_normal')\r\n self.assertIn('init_ops_v2', rn.__class__.__module__)\r\n finally:\r\n tf2._force_enable = tf2_force_enabled # pylint: disable=protected-access\r\n\r\n def test_custom_initializer_saving(self):\r\n\r\n def my_initializer(shape, dtype=None):\r\n return array_ops.ones(shape, dtype=dtype)\r\n\r\n inputs = keras.Input((10,))\r\n outputs = keras.layers.Dense(1, kernel_initializer=my_initializer)(inputs)\r\n model = keras.Model(inputs, outputs)\r\n model2 = model.from_config(\r\n model.get_config(), custom_objects={'my_initializer': my_initializer})\r\n self.assertEqual(model2.layers[1].kernel_initializer, my_initializer)\r\n\r\n @test_util.run_v2_only\r\n def test_load_external_variance_scaling_v2(self):\r\n external_serialized_json = {\r\n 'class_name': 'VarianceScaling',\r\n 'config': {\r\n 'distribution': 'normal',\r\n 'mode': 'fan_avg',\r\n 'scale': 1.0,\r\n 'seed': None\r\n }\r\n }\r\n initializer = keras.initializers.deserialize(external_serialized_json)\r\n self.assertEqual(initializer.distribution, 'truncated_normal')\r\n\r\n\r\nif __name__ == '__main__':\r\n test.main()\r\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"LSTM Block Cell ops.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom absl.testing import parameterized\r\nimport numpy as np\r\n\r\nfrom tensorflow.contrib.rnn.python.kernel_tests import benchmarking\r\nfrom tensorflow.contrib.rnn.python.ops import lstm_ops\r\nfrom tensorflow.python.client import session\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import gen_array_ops\r\nfrom tensorflow.python.ops import gen_bitwise_ops\r\nfrom tensorflow.python.ops import gradients_impl\r\nfrom tensorflow.python.ops import init_ops\r\nfrom tensorflow.python.ops import rnn\r\nfrom tensorflow.python.ops import rnn_cell\r\nfrom tensorflow.python.ops import variable_scope\r\nfrom tensorflow.python.ops import variables\r\nfrom tensorflow.python.platform import test\r\n\r\nblock_lstm = lstm_ops._block_lstm # pylint: disable=protected-access\r\n\r\n\r\nclass _MaskedRandomUniformInitializer(init_ops.RandomUniform):\r\n \"\"\"Initializer for uniform dist tensors with trailing bits zeroed-out.\r\n\r\n Allow returning tensors with last few mantissa bits set to 0. This potentially\r\n helps avoid getting into precision issues when testing low precision (float16)\r\n computation.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n minval=0,\r\n maxval=None,\r\n seed=None,\r\n dtype=dtypes.float16,\r\n num_valid_mantissa_bits=4):\r\n \"\"\"Constructor.\r\n\r\n Args:\r\n minval: A python scalar or a scalar tensor. Lower bound of the range of\r\n random values to generate.\r\n maxval: A python scalar or a scalar tensor. Upper bound of the range of\r\n random values to generate. Defaults to 1 for float types.\r\n seed: A Python integer. Used to create random seeds. See\r\n `tf.compat.v1.set_random_seed` for behavior.\r\n dtype: The data type. Only supports tf.float16 for now.\r\n num_valid_mantissa_bits: number of non-zero mantissa bits, default to 4.\r\n\r\n Raises:\r\n ValueError: An error if `dtype` is not tf.float16.\r\n \"\"\"\r\n if dtype not in (dtypes.float16,):\r\n raise ValueError(\"dtype: %s not supported\" % dtype.name)\r\n\r\n super(_MaskedRandomUniformInitializer, self).__init__(\r\n minval=minval, maxval=maxval, seed=seed, dtype=dtype)\r\n self._num_mantissa_bits = 10\r\n self._num_valid_mantissa_bits = num_valid_mantissa_bits\r\n\r\n def __call__(self, shape, dtype=dtypes.float16, partition_info=None):\r\n if dtype and dtype != dtypes.float16:\r\n raise ValueError(\"dtype: %s not supported\" % dtype.name)\r\n res = super(_MaskedRandomUniformInitializer, self).__call__(\r\n shape, dtype, partition_info)\r\n # get uint16 view of the underlying buffer.\r\n res = gen_array_ops.bitcast(res, dtypes.uint16)\r\n\r\n # mask the last `shift` mantissa bits.\r\n shift = self._num_mantissa_bits - self._num_valid_mantissa_bits\r\n mask = (0xffff >> shift) << shift\r\n res = gen_bitwise_ops.bitwise_and(res, mask)\r\n\r\n # restore float16 view.\r\n return gen_array_ops.bitcast(res, dtype)\r\n\r\n\r\ndef _get_initializer(init_bound, dtype, seed):\r\n if dtype == dtypes.float16:\r\n return _MaskedRandomUniformInitializer(\r\n -init_bound, init_bound, dtype=dtype, seed=seed)\r\n else:\r\n return init_ops.random_uniform_initializer(\r\n -init_bound, init_bound, dtype=dtype, seed=seed)\r\n\r\n\r\ndef blocks_match(sess, use_peephole, dtype=dtypes.float32, cell_clip=None):\r\n batch_size = 2\r\n input_size = 3\r\n cell_size = 4\r\n sequence_length = 4\r\n\r\n inputs = []\r\n for _ in range(sequence_length):\r\n inp = ops.convert_to_tensor(\r\n np.random.randn(batch_size, input_size), dtype=dtype)\r\n inputs.append(inp)\r\n stacked_inputs = array_ops.stack(inputs)\r\n\r\n init_bound = 1e-1 if dtype == dtypes.float16 else 1e-2\r\n initializer = _get_initializer(init_bound, dtype=dtype, seed=19890212)\r\n\r\n with variable_scope.variable_scope(\"test\", initializer=initializer):\r\n # magic naming so that the cells pick up these variables and reuse them\r\n if use_peephole:\r\n wci = variable_scope.get_variable(\r\n \"rnn/lstm_cell/w_i_diag\", shape=[cell_size], dtype=dtype)\r\n wcf = variable_scope.get_variable(\r\n \"rnn/lstm_cell/w_f_diag\", shape=[cell_size], dtype=dtype)\r\n wco = variable_scope.get_variable(\r\n \"rnn/lstm_cell/w_o_diag\", shape=[cell_size], dtype=dtype)\r\n\r\n w = variable_scope.get_variable(\r\n \"rnn/lstm_cell/kernel\",\r\n shape=[input_size + cell_size, cell_size * 4],\r\n dtype=dtype)\r\n b = variable_scope.get_variable(\r\n \"rnn/lstm_cell/bias\",\r\n shape=[cell_size * 4],\r\n dtype=dtype,\r\n initializer=init_ops.zeros_initializer())\r\n\r\n basic_cell = rnn_cell.LSTMCell(\r\n cell_size,\r\n use_peepholes=use_peephole,\r\n cell_clip=cell_clip,\r\n dtype=dtype,\r\n state_is_tuple=True,\r\n reuse=True)\r\n basic_outputs_op, basic_state_op = rnn.static_rnn(\r\n basic_cell, inputs, dtype=dtype)\r\n\r\n if use_peephole:\r\n _, _, _, _, _, _, block_outputs_op = block_lstm(\r\n ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),\r\n inputs,\r\n w,\r\n b,\r\n wci=wci,\r\n wcf=wcf,\r\n wco=wco,\r\n cell_clip=cell_clip,\r\n use_peephole=True)\r\n else:\r\n _, _, _, _, _, _, block_outputs_op = block_lstm(\r\n ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),\r\n inputs,\r\n w,\r\n b,\r\n cell_clip=cell_clip)\r\n\r\n fused_cell = lstm_ops.LSTMBlockFusedCell(\r\n cell_size,\r\n cell_clip=cell_clip,\r\n use_peephole=use_peephole,\r\n reuse=True,\r\n name=\"rnn/lstm_cell\")\r\n fused_outputs_op, fused_state_op = fused_cell(stacked_inputs, dtype=dtype)\r\n\r\n sess.run([variables.global_variables_initializer()])\r\n basic_outputs, basic_state = sess.run([basic_outputs_op, basic_state_op[0]])\r\n basic_grads = sess.run(gradients_impl.gradients(basic_outputs_op, inputs))\r\n xs = [w, b]\r\n if use_peephole:\r\n xs += [wci, wcf, wco]\r\n basic_wgrads = sess.run(gradients_impl.gradients(basic_outputs_op, xs))\r\n\r\n block_outputs = sess.run(block_outputs_op)\r\n block_grads = sess.run(gradients_impl.gradients(block_outputs_op, inputs))\r\n block_wgrads = sess.run(gradients_impl.gradients(block_outputs_op, xs))\r\n\r\n xs = [w, b]\r\n if use_peephole:\r\n xs += [wci, wcf, wco]\r\n fused_outputs, fused_state = sess.run([fused_outputs_op, fused_state_op[0]])\r\n fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))\r\n fused_wgrads = sess.run(gradients_impl.gradients(fused_outputs_op, xs))\r\n\r\n return (basic_state, fused_state, basic_outputs, block_outputs,\r\n fused_outputs, basic_grads, block_grads, fused_grads, basic_wgrads,\r\n block_wgrads, fused_wgrads)\r\n\r\n\r\nclass LSTMBlockCellTest(test.TestCase, parameterized.TestCase):\r\n\r\n TEST_CASES = ({\r\n \"testcase_name\": \"Fp32\",\r\n \"dtype\": dtypes.float32,\r\n \"rtol\": 1e-6,\r\n \"atol\": 1e-6\r\n }, {\r\n \"testcase_name\": \"Fp16\",\r\n \"dtype\": dtypes.float16,\r\n \"rtol\": 8e-3,\r\n \"atol\": 8e-4\r\n })\r\n\r\n def testNoneDimsWithDynamicRNN(self):\r\n with self.session(use_gpu=True, graph=ops.Graph()) as sess:\r\n batch_size = 4\r\n num_steps = 5\r\n input_dim = 6\r\n cell_size = 7\r\n\r\n cell = lstm_ops.LSTMBlockCell(cell_size)\r\n x = array_ops.placeholder(dtypes.float32, shape=(None, None, input_dim))\r\n\r\n output, _ = rnn.dynamic_rnn(\r\n cell, x, time_major=True, dtype=dtypes.float32)\r\n sess.run(variables.global_variables_initializer())\r\n feed = {}\r\n feed[x] = np.random.randn(num_steps, batch_size, input_dim)\r\n sess.run(output, feed)\r\n\r\n def testLSTMBlockCell(self):\r\n with self.session(use_gpu=True, graph=ops.Graph()) as sess:\r\n with variable_scope.variable_scope(\r\n \"root\", initializer=init_ops.constant_initializer(0.5)):\r\n x = array_ops.zeros([1, 2])\r\n m0 = array_ops.zeros([1, 2])\r\n m1 = array_ops.zeros([1, 2])\r\n m2 = array_ops.zeros([1, 2])\r\n m3 = array_ops.zeros([1, 2])\r\n g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(\r\n [lstm_ops.LSTMBlockCell(2)\r\n for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))\r\n sess.run([variables.global_variables_initializer()])\r\n res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {\r\n x.name: np.array([[1., 1.]]),\r\n m0.name: 0.1 * np.ones([1, 2]),\r\n m1.name: 0.1 * np.ones([1, 2]),\r\n m2.name: 0.1 * np.ones([1, 2]),\r\n m3.name: 0.1 * np.ones([1, 2])\r\n })\r\n self.assertEqual(len(res), 5)\r\n self.assertAllClose(res[0], [[0.24024698, 0.24024698]])\r\n # These numbers are from testBasicLSTMCell and only test c/h.\r\n self.assertAllClose(res[1], [[0.68967271, 0.68967271]])\r\n self.assertAllClose(res[2], [[0.44848421, 0.44848421]])\r\n self.assertAllClose(res[3], [[0.39897051, 0.39897051]])\r\n self.assertAllClose(res[4], [[0.24024698, 0.24024698]])\r\n\r\n def testCompatibleNames(self):\r\n with self.session(use_gpu=True, graph=ops.Graph()):\r\n cell = rnn_cell.LSTMCell(10)\r\n pcell = rnn_cell.LSTMCell(10, use_peepholes=True)\r\n inputs = [array_ops.zeros([4, 5])] * 6\r\n rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope=\"basic\")\r\n rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope=\"peephole\")\r\n basic_names = {\r\n v.name: v.get_shape()\r\n for v in variables.trainable_variables()\r\n }\r\n\r\n with self.session(use_gpu=True, graph=ops.Graph()):\r\n cell = lstm_ops.LSTMBlockCell(10)\r\n pcell = lstm_ops.LSTMBlockCell(10, use_peephole=True)\r\n inputs = [array_ops.zeros([4, 5])] * 6\r\n rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope=\"basic\")\r\n rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope=\"peephole\")\r\n block_names = {\r\n v.name: v.get_shape()\r\n for v in variables.trainable_variables()\r\n }\r\n\r\n with self.session(use_gpu=True, graph=ops.Graph()):\r\n cell = lstm_ops.LSTMBlockFusedCell(10)\r\n pcell = lstm_ops.LSTMBlockFusedCell(10, use_peephole=True)\r\n inputs = array_ops.stack([array_ops.zeros([4, 5])] * 6)\r\n cell(inputs, dtype=dtypes.float32, scope=\"basic/lstm_cell\")\r\n pcell(inputs, dtype=dtypes.float32, scope=\"peephole/lstm_cell\")\r\n fused_names = {\r\n v.name: v.get_shape()\r\n for v in variables.trainable_variables()\r\n }\r\n\r\n self.assertEqual(basic_names, block_names)\r\n self.assertEqual(basic_names, fused_names)\r\n\r\n def testLSTMBasicToBlockCell(self):\r\n with self.session(use_gpu=True) as sess:\r\n x = array_ops.zeros([1, 2])\r\n x_values = np.random.randn(1, 2)\r\n\r\n m0_val = 0.1 * np.ones([1, 2])\r\n m1_val = -0.1 * np.ones([1, 2])\r\n m2_val = -0.2 * np.ones([1, 2])\r\n m3_val = 0.2 * np.ones([1, 2])\r\n\r\n initializer = init_ops.random_uniform_initializer(\r\n -0.01, 0.01, seed=19890212)\r\n with variable_scope.variable_scope(\"basic\", initializer=initializer):\r\n m0 = array_ops.zeros([1, 2])\r\n m1 = array_ops.zeros([1, 2])\r\n m2 = array_ops.zeros([1, 2])\r\n m3 = array_ops.zeros([1, 2])\r\n g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(\r\n [rnn_cell.BasicLSTMCell(2, state_is_tuple=True) for _ in range(2)],\r\n state_is_tuple=True)(x, ((m0, m1), (m2, m3)))\r\n sess.run([variables.global_variables_initializer()])\r\n basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {\r\n x.name: x_values,\r\n m0.name: m0_val,\r\n m1.name: m1_val,\r\n m2.name: m2_val,\r\n m3.name: m3_val\r\n })\r\n\r\n with variable_scope.variable_scope(\"block\", initializer=initializer):\r\n m0 = array_ops.zeros([1, 2])\r\n m1 = array_ops.zeros([1, 2])\r\n m2 = array_ops.zeros([1, 2])\r\n m3 = array_ops.zeros([1, 2])\r\n g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(\r\n [lstm_ops.LSTMBlockCell(2)\r\n for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))\r\n sess.run([variables.global_variables_initializer()])\r\n block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {\r\n x.name: x_values,\r\n m0.name: m0_val,\r\n m1.name: m1_val,\r\n m2.name: m2_val,\r\n m3.name: m3_val\r\n })\r\n\r\n self.assertEqual(len(basic_res), len(block_res))\r\n for basic, block in zip(basic_res, block_res):\r\n self.assertAllClose(basic, block)\r\n\r\n def testLSTMBasicToBlockCellPeeping(self):\r\n with self.session(use_gpu=True) as sess:\r\n x = array_ops.zeros([1, 2])\r\n x_values = np.random.randn(1, 2)\r\n\r\n m0_val = 0.1 * np.ones([1, 2])\r\n m1_val = -0.1 * np.ones([1, 2])\r\n m2_val = -0.2 * np.ones([1, 2])\r\n m3_val = 0.2 * np.ones([1, 2])\r\n\r\n initializer = init_ops.random_uniform_initializer(\r\n -0.01, 0.01, seed=19890212)\r\n with variable_scope.variable_scope(\"basic\", initializer=initializer):\r\n m0 = array_ops.zeros([1, 2])\r\n m1 = array_ops.zeros([1, 2])\r\n m2 = array_ops.zeros([1, 2])\r\n m3 = array_ops.zeros([1, 2])\r\n g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(\r\n [\r\n rnn_cell.LSTMCell(2, use_peepholes=True, state_is_tuple=True)\r\n for _ in range(2)\r\n ],\r\n state_is_tuple=True)(x, ((m0, m1), (m2, m3)))\r\n sess.run([variables.global_variables_initializer()])\r\n basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {\r\n x.name: x_values,\r\n m0.name: m0_val,\r\n m1.name: m1_val,\r\n m2.name: m2_val,\r\n m3.name: m3_val\r\n })\r\n\r\n with variable_scope.variable_scope(\"block\", initializer=initializer):\r\n m0 = array_ops.zeros([1, 2])\r\n m1 = array_ops.zeros([1, 2])\r\n m2 = array_ops.zeros([1, 2])\r\n m3 = array_ops.zeros([1, 2])\r\n g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(\r\n [lstm_ops.LSTMBlockCell(2, use_peephole=True) for _ in range(2)],\r\n state_is_tuple=True)(x, ((m0, m1), (m2, m3)))\r\n sess.run([variables.global_variables_initializer()])\r\n block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {\r\n x.name: x_values,\r\n m0.name: m0_val,\r\n m1.name: m1_val,\r\n m2.name: m2_val,\r\n m3.name: m3_val\r\n })\r\n\r\n self.assertEqual(len(basic_res), len(block_res))\r\n for basic, block in zip(basic_res, block_res):\r\n self.assertAllClose(basic, block)\r\n\r\n def LSTMBasicToBlockTestHelper(self,\r\n dtype=dtypes.float32,\r\n use_peephole=False,\r\n cell_clip=None,\r\n rtol=1e-6,\r\n atol=1e-6):\r\n with self.session(use_gpu=True, graph=ops.Graph()) as sess:\r\n (basic_state, fused_state, basic_outputs, block_outputs, fused_outputs,\r\n basic_grads, block_grads, fused_grads, basic_wgrads, block_wgrads,\r\n fused_wgrads) = blocks_match(\r\n sess, use_peephole=use_peephole, dtype=dtype, cell_clip=cell_clip)\r\n\r\n self.assertAllClose(basic_outputs, block_outputs, rtol=rtol, atol=atol)\r\n self.assertAllClose(basic_grads, block_grads, rtol=rtol, atol=atol)\r\n for basic, block in zip(basic_wgrads, block_wgrads):\r\n self.assertAllClose(basic, block, rtol=rtol, atol=atol)\r\n\r\n self.assertAllClose(basic_outputs, fused_outputs, rtol=rtol, atol=atol)\r\n self.assertAllClose(basic_state, fused_state, rtol=rtol, atol=atol)\r\n self.assertAllClose(basic_grads, fused_grads, rtol=rtol, atol=atol)\r\n for basic, fused in zip(basic_wgrads, fused_wgrads):\r\n self.assertAllClose(basic, fused, rtol=rtol, atol=atol)\r\n\r\n @parameterized.named_parameters(*TEST_CASES)\r\n def testLSTMBasicToBlock(self, dtype, rtol, atol):\r\n self.LSTMBasicToBlockTestHelper(\r\n dtype, use_peephole=False, rtol=rtol, atol=atol)\r\n\r\n @parameterized.named_parameters(*TEST_CASES)\r\n def testLSTMBasicToBlockPeeping(self, dtype, rtol, atol):\r\n self.LSTMBasicToBlockTestHelper(\r\n dtype, use_peephole=True, rtol=rtol, atol=atol)\r\n\r\n @parameterized.named_parameters(*TEST_CASES)\r\n def testLSTMBasicToBlockCellClip(self, dtype, rtol, atol):\r\n self.LSTMBasicToBlockTestHelper(\r\n dtype, use_peephole=True, cell_clip=0.5, rtol=rtol, atol=atol)\r\n\r\n def testLSTMFusedSequenceLengths(self):\r\n \"\"\"Verify proper support for sequence lengths in LSTMBlockFusedCell.\"\"\"\r\n with self.session(use_gpu=True) as sess:\r\n batch_size = 3\r\n input_size = 4\r\n cell_size = 5\r\n max_sequence_length = 6\r\n\r\n inputs = []\r\n for _ in range(max_sequence_length):\r\n inp = ops.convert_to_tensor(\r\n np.random.randn(batch_size, input_size), dtype=dtypes.float32)\r\n inputs.append(inp)\r\n seq_lengths = constant_op.constant([3, 4, 5])\r\n cell_inputs = array_ops.stack(inputs)\r\n\r\n initializer = init_ops.random_uniform_initializer(\r\n -0.01, 0.01, seed=19890213)\r\n\r\n with variable_scope.variable_scope(\"lstm_cell\", initializer=initializer):\r\n # magic naming so that the cells pick up these variables and reuse them\r\n variable_scope.get_variable(\r\n \"kernel\",\r\n shape=[input_size + cell_size, cell_size * 4],\r\n dtype=dtypes.float32)\r\n\r\n variable_scope.get_variable(\r\n \"bias\",\r\n shape=[cell_size * 4],\r\n dtype=dtypes.float32,\r\n initializer=init_ops.zeros_initializer())\r\n\r\n cell = lstm_ops.LSTMBlockFusedCell(\r\n cell_size, cell_clip=0, use_peephole=False, reuse=True,\r\n name=\"lstm_cell\")\r\n\r\n fused_outputs_op, fused_state_op = cell(\r\n cell_inputs, dtype=dtypes.float32, sequence_length=seq_lengths)\r\n\r\n cell_vars = [\r\n v for v in variables.trainable_variables()\r\n if v.name.endswith(\"kernel\") or v.name.endswith(\"bias\")\r\n ]\r\n\r\n # Verify that state propagation works if we turn our sequence into\r\n # tiny (single-time) subsequences, i.e. unfuse the cell\r\n unfused_outputs_op = []\r\n state = None\r\n with variable_scope.variable_scope(\r\n variable_scope.get_variable_scope(), reuse=True):\r\n for i, inp in enumerate(inputs):\r\n lengths = [int(i < l) for l in seq_lengths.eval()]\r\n output, state = cell(\r\n array_ops.expand_dims(inp, 0),\r\n initial_state=state,\r\n dtype=dtypes.float32,\r\n sequence_length=lengths)\r\n unfused_outputs_op.append(output[0])\r\n unfused_outputs_op = array_ops.stack(unfused_outputs_op)\r\n\r\n sess.run([variables.global_variables_initializer()])\r\n unfused_outputs, unfused_state = sess.run([unfused_outputs_op, state[0]])\r\n unfused_grads = sess.run(\r\n gradients_impl.gradients(unfused_outputs_op, inputs))\r\n unfused_wgrads = sess.run(\r\n gradients_impl.gradients(unfused_outputs_op, cell_vars))\r\n\r\n fused_outputs, fused_state = sess.run(\r\n [fused_outputs_op, fused_state_op[0]])\r\n fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))\r\n fused_wgrads = sess.run(\r\n gradients_impl.gradients(fused_outputs_op, cell_vars))\r\n\r\n self.assertAllClose(fused_outputs, unfused_outputs)\r\n self.assertAllClose(fused_state, unfused_state)\r\n self.assertAllClose(fused_grads, unfused_grads)\r\n for fused, unfused in zip(fused_wgrads, unfused_wgrads):\r\n self.assertAllClose(fused, unfused, rtol=1e-6, atol=1e-6)\r\n\r\n#### Benchmarking.\r\n\r\n\r\nclass BenchmarkLSTMBlock(test.Benchmark):\r\n\r\n def benchmarkLSTMBlockCellFpropWithDynamicRNN(self):\r\n print(\"BlockLSTMCell forward propagation via dynamic_rnn().\")\r\n print(\"--------------------------------------------------------------\")\r\n print(\"LSTMBlockCell Seconds per inference.\")\r\n print(\"batch_size,cell_size,input_size,time_steps,use_gpu,wall_time\")\r\n iters = 10\r\n for config in benchmarking.dict_product({\r\n \"batch_size\": [1, 8, 13, 32, 67, 128],\r\n \"cell_size\": [128, 250, 512, 650, 1024, 1350],\r\n \"time_steps\": [40],\r\n \"use_gpu\": [True, False],\r\n \"dtype\": [\"float32\", \"float16\"],\r\n }):\r\n dtype = dtypes.float32 if config[\"dtype\"] == \"float32\" else dtypes.float16\r\n with ops.Graph().as_default():\r\n with benchmarking.device(use_gpu=config[\"use_gpu\"]):\r\n inputs = variable_scope.get_variable(\r\n \"x\",\r\n dtype=dtype,\r\n shape=[\r\n config[\"time_steps\"], config[\"batch_size\"],\r\n config[\"cell_size\"]\r\n ])\r\n cell = lstm_ops.LSTMBlockCell(config[\"cell_size\"], dtype=dtype)\r\n outputs = rnn.dynamic_rnn(cell, inputs, time_major=True, dtype=dtype)\r\n init_op = variables.global_variables_initializer()\r\n\r\n with session.Session() as sess:\r\n sess.run(init_op)\r\n wall_time = benchmarking.seconds_per_run(outputs, sess, iters)\r\n\r\n # Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable\r\n # is set, this will produce a copy-paste-able CSV file.\r\n print(\",\".join(\r\n map(str, [\r\n config[\"dtype\"], config[\"batch_size\"], config[\"cell_size\"],\r\n config[\"cell_size\"], config[\"time_steps\"], config[\"use_gpu\"],\r\n wall_time\r\n ])))\r\n benchmark_name_template = \"_\".join([\r\n \"LSTMBlockCell_fprop\", \"DT_%(dtype)s\", \"BS%(batch_size)i\",\r\n \"CS%(cell_size)i\", \"IS%(cell_size)i\", \"TS%(time_steps)i\",\r\n \"gpu_%(use_gpu)s\"\r\n ])\r\n\r\n self.report_benchmark(\r\n name=benchmark_name_template % config,\r\n iters=iters,\r\n wall_time=wall_time,\r\n extras=config)\r\n\r\n def benchmarkLSTMBlockCellBpropWithDynamicRNN(self):\r\n print(\"BlockLSTMCell backward propagation via dynamic_rnn().\")\r\n print(\"--------------------------------------------------------------\")\r\n print(\"LSTMBlockCell Seconds per inference.\")\r\n print(\"batch_size,cell_size,input_size,time_steps,use_gpu,wall_time\")\r\n iters = 10\r\n for config in benchmarking.dict_product({\r\n \"batch_size\": [1, 8, 13, 32, 67, 128],\r\n \"cell_size\": [128, 250, 512, 650, 1024, 1350],\r\n \"time_steps\": [40],\r\n \"use_gpu\": [True, False],\r\n \"dtype\": [\"float32\", \"float16\"],\r\n }):\r\n dtype = dtypes.float32 if config[\"dtype\"] == \"float32\" else dtypes.float16\r\n with ops.Graph().as_default():\r\n with benchmarking.device(use_gpu=config[\"use_gpu\"]):\r\n time_steps = config[\"time_steps\"]\r\n batch_size = config[\"batch_size\"]\r\n cell_size = input_size = config[\"cell_size\"]\r\n inputs = variable_scope.get_variable(\r\n \"x\", [time_steps, batch_size, cell_size],\r\n trainable=False,\r\n dtype=dtype)\r\n with variable_scope.variable_scope(\r\n \"rnn\", reuse=variable_scope.AUTO_REUSE):\r\n w = variable_scope.get_variable(\r\n \"rnn/lstm_cell/kernel\",\r\n shape=[input_size + cell_size, cell_size * 4],\r\n dtype=dtype)\r\n b = variable_scope.get_variable(\r\n \"rnn/lstm_cell/bias\",\r\n shape=[cell_size * 4],\r\n dtype=dtype,\r\n initializer=init_ops.zeros_initializer())\r\n cell = lstm_ops.LSTMBlockCell(cell_size, dtype=dtype)\r\n outputs = rnn.dynamic_rnn(\r\n cell, inputs, time_major=True, dtype=dtype)\r\n grads = gradients_impl.gradients(outputs, [inputs, w, b])\r\n init_op = variables.global_variables_initializer()\r\n\r\n with session.Session() as sess:\r\n sess.run(init_op)\r\n wall_time = benchmarking.seconds_per_run(grads, sess, iters)\r\n\r\n # Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable\r\n # is set, this will produce a copy-paste-able CSV file.\r\n print(\",\".join(\r\n map(str, [\r\n config[\"dtype\"], batch_size, cell_size, cell_size, time_steps,\r\n config[\"use_gpu\"], wall_time\r\n ])))\r\n benchmark_name_template = \"_\".join([\r\n \"LSTMBlockCell_bprop\", \"DT_%(dtype)s\", \"BS%(batch_size)i\",\r\n \"CS%(cell_size)i\", \"IS%(cell_size)i\", \"TS%(time_steps)i\",\r\n \"gpu_%(use_gpu)s\"\r\n ])\r\n\r\n self.report_benchmark(\r\n name=benchmark_name_template % config,\r\n iters=iters,\r\n wall_time=wall_time,\r\n extras=config)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test.main()\r\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Training related logic for Keras model in TF 2.0 context.\r\n\r\nNote that all the code under this module is under active development, please DO\r\nNOT use it unless you are really sure what you are doing.\r\n\"\"\"\r\n\r\n# pylint: disable=protected-access\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport functools\r\n\r\nfrom tensorflow.python.distribute import distribution_strategy_context\r\nfrom tensorflow.python.eager import def_function\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import tensor_util\r\nfrom tensorflow.python.framework.ops import composite_tensor\r\nfrom tensorflow.python.keras import backend\r\nfrom tensorflow.python.keras.distribute import distributed_training_utils as dist_utils\r\nfrom tensorflow.python.keras.engine import training_eager\r\nfrom tensorflow.python.keras.engine import training_utils\r\nfrom tensorflow.python.keras.utils.mode_keys import ModeKeys\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.util import nest\r\n\r\n\r\ndef _get_or_make_execution_function(model, mode):\r\n \"\"\"Makes or reuses function to run one step of distributed model execution.\"\"\"\r\n model._init_distributed_function_cache_if_not_compiled()\r\n\r\n # Use a key with 'v2' to distinguish from fall-back execution functions.\r\n key = (mode, 'v2')\r\n distributed_function = dist_utils.get_distributed_function(model, key)\r\n if distributed_function:\r\n return distributed_function\r\n\r\n distribution_function = _make_execution_function(model, mode)\r\n dist_utils.set_distributed_function(model, key, distribution_function)\r\n return distribution_function\r\n\r\n\r\ndef _make_execution_function(model, mode):\r\n \"\"\"Creates a function to run one step of distributed model execution.\"\"\"\r\n per_replica_function = _make_replica_execution_function(mode)\r\n\r\n def distributed_function(input_iterator):\r\n \"\"\"A single step of the distributed execution across replicas.\"\"\"\r\n x, y, sample_weights = _prepare_feed_values(\r\n model, input_iterator, mode)\r\n # Call `Model.{train,test,predict}_on_batch` on every replica passing\r\n # PerReplicas as arguments. On every replica inside this call, each\r\n # PerReplica object will return the value for that replica. The outputs\r\n # are PerReplicas too.\r\n strategy = distribution_strategy_context.get_strategy()\r\n outputs = strategy.experimental_run_v2(\r\n per_replica_function, args=(model, x, y, sample_weights))\r\n # Out of PerReplica outputs reduce or pick values to return.\r\n all_outputs = dist_utils.unwrap_output_dict(\r\n strategy, outputs, mode)\r\n return all_outputs\r\n\r\n if not model.run_eagerly:\r\n distributed_function = def_function.function(\r\n distributed_function, autograph=False)\r\n\r\n def execution_function(input_fn):\r\n # `numpy` translates Tensors to values in Eager mode.\r\n return nest.map_structure(_non_none_constant_value,\r\n distributed_function(input_fn))\r\n\r\n return execution_function\r\n\r\n\r\ndef _non_none_constant_value(v):\r\n constant_value = tensor_util.constant_value(v)\r\n return constant_value if constant_value is not None else v\r\n\r\n\r\ndef _prepare_feed_values(model, inputs, mode):\r\n \"\"\"Prepare feed values to the model execution function.\r\n\r\n Arguments:\r\n model: Model to prepare feed values for.\r\n inputs: An iterator of model inputs, targets, and sample_weights.\r\n model inputs may be lists, single values, or dicts mapping input feed\r\n names to values.\r\n mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.\r\n\r\n Returns:\r\n Feed values for the model in the given mode. This is a tuple of\r\n the structure (inputs, targets, sample_weights), where each of\r\n (tuple, targets, sample_weights) may be a python list. Single values\r\n for inputs will always be wrapped in lists.\r\n \"\"\"\r\n inputs, targets, sample_weights = _get_input_from_iterator(inputs)\r\n\r\n # When the inputs are dict, then we want to flatten it in the same order as\r\n # the input layers, such that the data are fed into the input layers in the\r\n # correct order.\r\n if isinstance(inputs, dict):\r\n inputs = [inputs[key] for key in model._feed_input_names]\r\n else:\r\n inputs = training_utils.ModelInputs(inputs).as_list()\r\n\r\n if mode == ModeKeys.PREDICT:\r\n sample_weights = []\r\n targets = []\r\n\r\n ins = [inputs, targets, sample_weights]\r\n return tuple(ins)\r\n\r\n\r\ndef _get_input_from_iterator(iterator):\r\n \"\"\"Get elements from the iterator and verify the input shape and type.\"\"\"\r\n next_element = next(iterator)\r\n\r\n if (tensor_util.is_tensor(next_element) or\r\n isinstance(next_element, (dict, composite_tensor.CompositeTensor))):\r\n next_element = [next_element]\r\n if len(next_element) == 1:\r\n x, = next_element\r\n y = None\r\n sample_weights = None\r\n elif len(next_element) == 2:\r\n x, y = next_element\r\n sample_weights = None\r\n else:\r\n x, y, sample_weights = next_element\r\n\r\n # Validate that all the elements in x and y are of the same type and shape.\r\n dist_utils.validate_distributed_dataset_inputs(\r\n distribution_strategy_context.get_strategy(), x, y, sample_weights)\r\n return x, y, sample_weights\r\n\r\n\r\ndef _make_replica_execution_function(mode):\r\n \"\"\"A single step of the distributed execution on a replica.\"\"\"\r\n if mode == ModeKeys.TRAIN:\r\n func = train_on_batch\r\n elif mode == ModeKeys.TEST:\r\n func = test_on_batch\r\n else:\r\n def _predict_on_batch(model, x, y=None, sample_weights=None):\r\n del y, sample_weights\r\n return predict_on_batch(model, x)\r\n\r\n func = _predict_on_batch\r\n\r\n if mode != ModeKeys.PREDICT:\r\n # `reset_metrics` is set to False to maintain stateful metrics across\r\n # batch-level calls.\r\n func = functools.partial(func, reset_metrics=False)\r\n\r\n return func\r\n\r\n\r\ndef _prepare_model_with_inputs(model, dataset):\r\n \"\"\"Use the data from the adapter to config the model.\r\n\r\n Model need to be properly configured before training, eg build with inputs, or\r\n compile with inputs for subclass model.\r\n\r\n Args:\r\n model: a Keras model object.\r\n dataset: a eager dataset instance where the data will be extracted.\r\n \"\"\"\r\n if not model.inputs:\r\n inputs, target, _ = model._build_model_with_inputs(dataset, targets=None)\r\n else:\r\n inputs, target, _ = _get_input_from_iterator(iter(dataset))\r\n\r\n if not model._is_compiled and model.optimizer:\r\n model._compile_from_inputs(inputs, target, dataset, None)\r\n\r\n if target is not None:\r\n training_utils.prepare_sample_weight_modes(model._training_endpoints,\r\n model.sample_weight_mode)\r\n\r\n\r\ndef train_on_batch(\r\n model,\r\n x,\r\n y=None,\r\n sample_weight=None,\r\n class_weight=None,\r\n reset_metrics=True):\r\n \"\"\"Runs a single gradient update on a single batch of data.\r\n\r\n Arguments:\r\n model: The model to train.\r\n x: Input data. It could be:\r\n - A Numpy array (or array-like), or a list of arrays\r\n (in case the model has multiple inputs).\r\n - A TensorFlow tensor, or a list of tensors\r\n (in case the model has multiple inputs).\r\n - A dict mapping input names to the corresponding array/tensors,\r\n if the model has named inputs.\r\n - A `tf.data` dataset.\r\n y: Target data. Like the input data `x`, it could be either Numpy\r\n array(s) or TensorFlow tensor(s). It should be consistent with `x`\r\n (you cannot have Numpy inputs and tensor targets, or inversely). If\r\n `x` is a dataset `y` should not be specified\r\n (since targets will be obtained from the iterator).\r\n sample_weight: Optional array of the same length as x, containing\r\n weights to apply to the model's loss for each sample. In the case of\r\n temporal data, you can pass a 2D array with shape (samples,\r\n sequence_length), to apply a different weight to every timestep of\r\n every sample. In this case you should make sure to specify\r\n sample_weight_mode=\"temporal\" in compile(). This argument is not\r\n supported when `x` is a dataset.\r\n class_weight: Optional dictionary mapping class indices (integers) to a\r\n weight (float) to apply to the model's loss for the samples from this\r\n class during training. This can be useful to tell the model to \"pay\r\n more attention\" to samples from an under-represented class.\r\n reset_metrics: If `True`, the metrics returned will be only for this\r\n batch. If `False`, the metrics will be statefully accumulated across\r\n batches.\r\n\r\n Returns:\r\n Scalar training loss\r\n (if the model has a single output and no metrics)\r\n or list of scalars (if the model has multiple outputs\r\n and/or metrics). The attribute `model.metrics_names` will give you\r\n the display labels for the scalar outputs.\r\n\r\n Raises:\r\n ValueError: In case of invalid user-provided arguments.\r\n \"\"\"\r\n model._assert_compile_was_called()\r\n\r\n # TODO(scottzhu): Standardization should happen in the data handlers,\r\n ## not on a per batch basis in the *_on_batch methods\r\n # Validate and standardize user data.\r\n x, y, sample_weights = model._standardize_user_data(\r\n x, y, sample_weight=sample_weight, class_weight=class_weight,\r\n extract_tensors_from_dataset=True)\r\n batch_size = array_ops.shape(nest.flatten(x, expand_composites=True)[0])[0]\r\n # If `model._distribution_strategy` is True, then we are in a replica context\r\n # at this point because of the check above. `train_on_batch` is being run\r\n # for each replica by `model._distribution_strategy` and the same code path\r\n # as Eager is expected to be taken.\r\n outputs = training_eager.train_on_batch(\r\n model,\r\n x,\r\n y,\r\n sample_weights=sample_weights,\r\n output_loss_metrics=model._output_loss_metrics)\r\n\r\n if reset_metrics:\r\n model.reset_metrics()\r\n\r\n outputs['batch_size'] = math_ops.cast(batch_size, dtypes.int64)\r\n return outputs\r\n\r\n\r\ndef test_on_batch(model, x, y=None, sample_weight=None, reset_metrics=True):\r\n \"\"\"Test the model on a single batch of samples.\r\n\r\n Arguments:\r\n model: The model to test.\r\n x: Input data. It could be:\r\n - A Numpy array (or array-like), or a list of arrays\r\n (in case the model has multiple inputs).\r\n - A TensorFlow tensor, or a list of tensors\r\n (in case the model has multiple inputs).\r\n - A dict mapping input names to the corresponding array/tensors,\r\n if the model has named inputs.\r\n - A `tf.data` dataset.\r\n y: Target data. Like the input data `x`,\r\n it could be either Numpy array(s) or TensorFlow tensor(s).\r\n It should be consistent with `x` (you cannot have Numpy inputs and\r\n tensor targets, or inversely). If `x` is a dataset,\r\n `y` should not be specified\r\n (since targets will be obtained from the iterator).\r\n sample_weight: Optional array of the same length as x, containing\r\n weights to apply to the model's loss for each sample.\r\n In the case of temporal data, you can pass a 2D array\r\n with shape (samples, sequence_length),\r\n to apply a different weight to every timestep of every sample.\r\n In this case you should make sure to specify\r\n sample_weight_mode=\"temporal\" in compile(). This argument is not\r\n supported when `x` is a dataset.\r\n reset_metrics: If `True`, the metrics returned will be only for this\r\n batch. If `False`, the metrics will be statefully accumulated across\r\n batches.\r\n\r\n Returns:\r\n Scalar test loss (if the model has a single output and no metrics)\r\n or list of scalars (if the model has multiple outputs\r\n and/or metrics). The attribute `model.metrics_names` will give you\r\n the display labels for the scalar outputs.\r\n\r\n Raises:\r\n ValueError: In case of invalid user-provided arguments.\r\n \"\"\"\r\n model._assert_compile_was_called()\r\n\r\n # TODO(scottzhu): Standardization should happen in the data handlers,\r\n ## not on a per batch basis in the *_on_batch methods\r\n # Validate and standardize user data.\r\n x, y, sample_weights = model._standardize_user_data(\r\n x, y, sample_weight=sample_weight, extract_tensors_from_dataset=True)\r\n\r\n batch_size = array_ops.shape(nest.flatten(x, expand_composites=True)[0])[0]\r\n outputs = training_eager.test_on_batch(\r\n model,\r\n x,\r\n y,\r\n sample_weights=sample_weights,\r\n output_loss_metrics=model._output_loss_metrics)\r\n\r\n if reset_metrics:\r\n model.reset_metrics()\r\n\r\n outputs['batch_size'] = math_ops.cast(batch_size, dtypes.int64)\r\n return outputs\r\n\r\n\r\ndef predict_on_batch(model, x):\r\n \"\"\"Returns predictions for a single batch of samples.\r\n\r\n Arguments:\r\n model: The model to predict with.\r\n x: Input data. It could be:\r\n - A Numpy array (or array-like), or a list of arrays\r\n (in case the model has multiple inputs).\r\n - A TensorFlow tensor, or a list of tensors\r\n (in case the model has multiple inputs).\r\n - A `tf.data` dataset.\r\n\r\n Returns:\r\n Numpy array(s) of predictions.\r\n\r\n Raises:\r\n ValueError: In case of mismatch between given number of inputs and\r\n expectations of the model.\r\n \"\"\"\r\n # TODO(scottzhu): Standardization should happen in the data handlers,\r\n ## not on a per batch basis in the *_on_batch methods\r\n # Validate and standardize user data.\r\n inputs, _, _ = model._standardize_user_data(\r\n x, extract_tensors_from_dataset=True)\r\n\r\n # If `model._distribution_strategy` is True, then we are in a replica context\r\n # at this point.\r\n inputs = training_utils.cast_if_floating_dtype(inputs)\r\n if isinstance(inputs, collections.Sequence):\r\n # Unwrap lists with only one input, as we do when training on batch\r\n if len(inputs) == 1:\r\n inputs = inputs[0]\r\n\r\n with backend.eager_learning_phase_scope(0):\r\n return model(inputs) # pylint: disable=not-callable\r\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"tensorboard_logging provides logging that is also written to the events file.\r\n\r\nAny messages logged via this module will be logged both via the platform logging\r\nmechanism and to the SummaryWriter set via `set_summary_writer`. This is useful\r\nfor logging messages that you might want to be visible from inside TensorBoard\r\nor that should be permanently associated with the training session.\r\n\r\nYou can use this just like the logging module:\r\n\r\n>>> tensorboard_logging.set_summary_writer(summary_writer)\r\n>>> tensorboard_logging.info(\"my %s\", \"message\")\r\n>>> tensorboard_logging.log(tensorboard_logging.WARN, \"something\")\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport time\r\n\r\nfrom tensorflow.core.util import event_pb2\r\nfrom tensorflow.python.platform import tf_logging as logging\r\n\r\nDEBUG = 'DEBUG'\r\nINFO = 'INFO'\r\nWARN = 'WARN'\r\nERROR = 'ERROR'\r\nFATAL = 'FATAL'\r\n\r\n# Messages with levels below this verbosity will not be logged.\r\n_verbosity = WARN\r\n\r\n# A value meaning 'not set yet' so we can use None to mean 'user actively told\r\n# us they don't want a SummaryWriter'.\r\n_sentinel_summary_writer = object()\r\n\r\n# The SummaryWriter instance to use when logging, or None to not log, or\r\n# _sentinel_summary_writer to indicate that the user hasn't called\r\n# set_summary_writer yet.\r\n_summary_writer = _sentinel_summary_writer\r\n\r\n# Map from the tensorboard_logging logging enum values to the proto's enum\r\n# values.\r\n_LEVEL_PROTO_MAP = {\r\n DEBUG: event_pb2.LogMessage.DEBUGGING,\r\n INFO: event_pb2.LogMessage.INFO,\r\n WARN: event_pb2.LogMessage.WARN,\r\n ERROR: event_pb2.LogMessage.ERROR,\r\n FATAL: event_pb2.LogMessage.FATAL,\r\n}\r\n\r\n# Map from the tensorboard_logging module levels to the logging module levels.\r\n_PLATFORM_LOGGING_LEVEL_MAP = {\r\n DEBUG: logging.DEBUG,\r\n INFO: logging.INFO,\r\n WARN: logging.WARN,\r\n ERROR: logging.ERROR,\r\n FATAL: logging.FATAL\r\n}\r\n\r\n\r\ndef get_verbosity():\r\n return _verbosity\r\n\r\n\r\ndef set_verbosity(verbosity):\r\n _check_verbosity(verbosity)\r\n global _verbosity\r\n _verbosity = verbosity\r\n\r\n\r\ndef _check_verbosity(verbosity):\r\n if verbosity not in _LEVEL_PROTO_MAP:\r\n raise ValueError('Level %s is not a valid tensorboard_logging level' %\r\n verbosity)\r\n\r\n\r\ndef set_summary_writer(summary_writer):\r\n \"\"\"Sets the summary writer that events will be logged to.\r\n\r\n Calling any logging methods inside this module without calling this method\r\n will fail. If you don't want to log, call `set_summary_writer(None)`.\r\n\r\n Args:\r\n summary_writer: Either a SummaryWriter or None. None will cause messages not\r\n to be logged to any SummaryWriter, but they will still be passed to the\r\n platform logging module.\r\n \"\"\"\r\n global _summary_writer\r\n _summary_writer = summary_writer\r\n\r\n\r\ndef _clear_summary_writer():\r\n \"\"\"Makes all subsequent log invocations error.\r\n\r\n This is only used for testing. If you want to disable TensorBoard logging,\r\n call `set_summary_writer(None)` instead.\r\n \"\"\"\r\n global _summary_writer\r\n _summary_writer = _sentinel_summary_writer\r\n\r\n\r\ndef log(level, message, *args):\r\n \"\"\"Conditionally logs `message % args` at the level `level`.\r\n\r\n Note that tensorboard_logging verbosity and logging verbosity are separate;\r\n the message will always be passed through to the logging module regardless of\r\n whether it passes the tensorboard_logging verbosity check.\r\n\r\n Args:\r\n level: The verbosity level to use. Must be one of\r\n tensorboard_logging.{DEBUG, INFO, WARN, ERROR, FATAL}.\r\n message: The message template to use.\r\n *args: Arguments to interpolate to the message template, if any.\r\n\r\n Raises:\r\n ValueError: If `level` is not a valid logging level.\r\n RuntimeError: If the `SummaryWriter` to use has not been set.\r\n \"\"\"\r\n if _summary_writer is _sentinel_summary_writer:\r\n raise RuntimeError('Must call set_summary_writer before doing any '\r\n 'logging from tensorboard_logging')\r\n _check_verbosity(level)\r\n proto_level = _LEVEL_PROTO_MAP[level]\r\n if proto_level >= _LEVEL_PROTO_MAP[_verbosity]:\r\n log_message = event_pb2.LogMessage(level=proto_level,\r\n message=message % args)\r\n event = event_pb2.Event(wall_time=time.time(), log_message=log_message)\r\n\r\n if _summary_writer:\r\n _summary_writer.add_event(event)\r\n\r\n logging.log(_PLATFORM_LOGGING_LEVEL_MAP[level], message, *args)\r\n\r\n\r\ndef debug(message, *args):\r\n log(DEBUG, message, *args)\r\n\r\n\r\ndef info(message, *args):\r\n log(INFO, message, *args)\r\n\r\n\r\ndef warn(message, *args):\r\n log(WARN, message, *args)\r\n\r\n\r\ndef error(message, *args):\r\n log(ERROR, message, *args)\r\n\r\n\r\ndef fatal(message, *args):\r\n log(FATAL, message, *args)\r\n" ]
[ [ "tensorflow.python.ops.array_ops.pad", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.contrib.timeseries.python.timeseries.math_utils.variable_covariance_matrix", "tensorflow.python.framework.tensor_shape.dimension_value", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.python.ops.math_ops.reduce_mean", "tensorflow.python.ops.array_ops.concat", "tensorflow.contrib.timeseries.python.timeseries.state_space_models.state_space_model.StateSpaceModelConfiguration", "tensorflow.python.ops.linalg_ops.eye" ], [ "numpy.ones", "numpy.fft.fftshift", "tensorflow.python.ops.spectral_ops_test_util.fft_kernel_label_map", "tensorflow.python.platform.test.is_gpu_available", "numpy.fft.rfft2", "numpy.random.rand", "tensorflow.python.ops.signal.fft_ops.fftshift", "tensorflow.python.ops.signal.fft_ops.ifftshift", "tensorflow.python.framework.dtypes.as_dtype", "numpy.random.uniform", "numpy.fft.fft2", "numpy.zeros", "numpy.power", "numpy.prod", "tensorflow.python.framework.ops.convert_to_tensor", "numpy.fft.irfft2", "tensorflow.core.protobuf.config_pb2.ConfigProto", "numpy.fft.ifft2", "tensorflow.python.ops.math_ops.complex", "numpy.fft.ifftshift", "tensorflow.python.platform.test.main", "tensorflow.python.ops.math_ops.conj" ], [ "tensorflow.python.data.experimental.ops.optimization.assert_next", "tensorflow.python.platform.test.main", "tensorflow.python.data.ops.dataset_ops.Options", "tensorflow.python.data.experimental.ops.stats_aggregator.StatsAggregator", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors" ], [ "tensorflow.python.util.deprecation.silence" ], [ "numpy.eye", "numpy.transpose", "numpy.linalg.inv", "tensorflow.python.ops.math_ops.matmul", "numpy.linalg.det", "numpy.random.RandomState", "tensorflow.python.platform.test.main", "numpy.arange", "tensorflow.python.ops.array_ops.matrix_band_part", "scipy.stats.multivariate_normal" ], [ "tensorflow.python.keras.initializers.ConstantV2", "tensorflow.python.keras.initializers.lecun_normalV2", "tensorflow.python.keras.backend.get_value", "tensorflow.python.keras.initializers.TruncatedNormalV2", "tensorflow.python.keras.initializers.OrthogonalV2", "tensorflow.python.keras.initializers.he_uniformV2", "tensorflow.python.keras.initializers.GlorotUniformV2", "tensorflow.python.ops.init_ops._compute_fans", "tensorflow.python.keras.Model", "tensorflow.python.keras.initializers.RandomUniformV2", "tensorflow.python.keras.Input", "tensorflow.python.keras.initializers.OnesV2", "tensorflow.python.tf2.enable", "tensorflow.python.keras.initializers.RandomNormalV2", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.keras.initializers.ZerosV2", "tensorflow.python.keras.initializers.deserialize", "tensorflow.python.keras.layers.Dense", "tensorflow.python.keras.initializers.IdentityV2", "tensorflow.python.keras.initializers.GlorotNormalV2", "tensorflow.python.keras.initializers.he_normalV2", "tensorflow.python.platform.test.main", "tensorflow.python.keras.initializers.lecun_uniformV2", "numpy.sqrt", "tensorflow.python.keras.initializers.get" ], [ "numpy.ones", "tensorflow.contrib.rnn.python.kernel_tests.benchmarking.seconds_per_run", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.contrib.rnn.python.ops.lstm_ops.LSTMBlockFusedCell", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.ops.rnn.static_rnn", "tensorflow.python.framework.constant_op.constant", "tensorflow.contrib.rnn.python.kernel_tests.benchmarking.dict_product", "tensorflow.python.ops.init_ops.random_uniform_initializer", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.gen_bitwise_ops.bitwise_and", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.python.client.session.Session", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.python.ops.rnn.dynamic_rnn", "tensorflow.contrib.rnn.python.ops.lstm_ops.LSTMBlockCell", "tensorflow.python.ops.gen_array_ops.bitcast", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.variables.trainable_variables", "tensorflow.contrib.rnn.python.kernel_tests.benchmarking.device", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.variable_scope.get_variable_scope", "numpy.random.randn", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.rnn_cell.LSTMCell", "tensorflow.python.ops.rnn_cell.BasicLSTMCell", "numpy.array" ], [ "tensorflow.python.keras.engine.training_eager.test_on_batch", "tensorflow.python.keras.engine.training_utils.prepare_sample_weight_modes", "tensorflow.python.keras.distribute.distributed_training_utils.set_distributed_function", "tensorflow.python.keras.engine.training_eager.train_on_batch", "tensorflow.python.distribute.distribution_strategy_context.get_strategy", "tensorflow.python.util.nest.flatten", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.keras.engine.training_utils.cast_if_floating_dtype", "tensorflow.python.keras.distribute.distributed_training_utils.unwrap_output_dict", "tensorflow.python.keras.backend.eager_learning_phase_scope", "tensorflow.python.eager.def_function.function", "tensorflow.python.keras.distribute.distributed_training_utils.get_distributed_function", "tensorflow.python.keras.engine.training_utils.ModelInputs", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.framework.tensor_util.is_tensor" ], [ "tensorflow.core.util.event_pb2.LogMessage", "tensorflow.python.platform.tf_logging.log" ] ]
Adarsh2910/katib
[ "cd095d6a33401cfddee8188943b60cd12c950c33" ]
[ "pkg/suggestion/v1beta1/nas/enas/service.py" ]
[ "import logging\nfrom logging import getLogger, StreamHandler, INFO\nimport json\nimport os\nimport tensorflow as tf\nimport grpc\n\nfrom pkg.apis.manager.v1beta1.python import api_pb2\nfrom pkg.apis.manager.v1beta1.python import api_pb2_grpc\nfrom pkg.suggestion.v1beta1.nas.enas.Controller import Controller\nfrom pkg.suggestion.v1beta1.nas.enas.Operation import SearchSpace\nfrom pkg.suggestion.v1beta1.nas.enas.AlgorithmSettings import (\n parseAlgorithmSettings, algorithmSettingsValidator, enableNoneSettingsList)\nfrom pkg.suggestion.v1beta1.internal.base_health_service import HealthServicer\n\n\nclass EnasExperiment:\n def __init__(self, request, logger):\n self.logger = logger\n self.experiment_name = request.experiment.name\n self.experiment = request.experiment\n self.num_trials = 1\n self.tf_graph = tf.Graph()\n self.ctrl_cache_file = \"ctrl_cache/{}.ckpt\".format(\n self.experiment_name)\n self.suggestion_step = 0\n self.algorithm_settings = None\n self.controller = None\n self.num_layers = None\n self.input_sizes = None\n self.output_sizes = None\n self.num_operations = None\n self.search_space = None\n self.opt_direction = None\n self.objective_name = None\n self.logger.info(\"-\" * 100 + \"\\nSetting Up Suggestion for Experiment {}\\n\".format(\n self.experiment_name) + \"-\" * 100)\n self._get_experiment_param()\n self._setup_controller()\n self.logger.info(\">>> Suggestion for Experiment {} has been initialized.\\n\".format(\n self.experiment_name))\n\n def _get_experiment_param(self):\n # this function need to\n # 1) get the number of layers\n # 2) get the I/O size\n # 3) get the available operations\n # 4) get the optimization direction (i.e. minimize or maximize)\n # 5) get the objective name\n # 6) get the algorithm settings\n\n # Get Search Space\n self.opt_direction = self.experiment.spec.objective.type\n self.objective_name = self.experiment.spec.objective.objective_metric_name\n\n nas_config = self.experiment.spec.nas_config\n\n graph_config = nas_config.graph_config\n self.num_layers = int(graph_config.num_layers)\n self.input_sizes = list(map(int, graph_config.input_sizes))\n self.output_sizes = list(map(int, graph_config.output_sizes))\n\n search_space_raw = nas_config.operations\n search_space_object = SearchSpace(search_space_raw)\n self.search_space = search_space_object.search_space\n self.num_operations = search_space_object.num_operations\n\n self.print_search_space()\n\n # Get Experiment Algorithm Settings\n settings_raw = self.experiment.spec.algorithm.algorithm_settings\n self.algorithm_settings = parseAlgorithmSettings(settings_raw)\n\n self.print_algorithm_settings()\n\n def _setup_controller(self):\n\n with self.tf_graph.as_default():\n\n self.controller = Controller(\n num_layers=self.num_layers,\n num_operations=self.num_operations,\n controller_hidden_size=self.algorithm_settings['controller_hidden_size'],\n controller_temperature=self.algorithm_settings['controller_temperature'],\n controller_tanh_const=self.algorithm_settings['controller_tanh_const'],\n controller_entropy_weight=self.algorithm_settings['controller_entropy_weight'],\n controller_baseline_decay=self.algorithm_settings['controller_baseline_decay'],\n controller_learning_rate=self.algorithm_settings[\"controller_learning_rate\"],\n controller_skip_target=self.algorithm_settings['controller_skip_target'],\n controller_skip_weight=self.algorithm_settings['controller_skip_weight'],\n controller_name=\"Ctrl_\" + self.experiment_name,\n logger=self.logger)\n\n self.controller.build_trainer()\n\n def print_search_space(self):\n if self.search_space is None:\n self.logger.warning(\n \"Error! The Suggestion has not yet been initialized!\")\n return\n\n self.logger.info(\n \">>> Search Space for Experiment {}\".format(self.experiment_name))\n for opt in self.search_space:\n opt.print_op(self.logger)\n self.logger.info(\n \"There are {} operations in total.\\n\".format(self.num_operations))\n\n def print_algorithm_settings(self):\n if self.algorithm_settings is None:\n self.logger.warning(\n \"Error! The Suggestion has not yet been initialized!\")\n return\n\n self.logger.info(\">>> Parameters of LSTM Controller for Experiment {}\\n\".format(\n self.experiment_name))\n for spec in self.algorithm_settings:\n if len(spec) > 22:\n self.logger.info(\"{}:\\t{}\".format(\n spec, self.algorithm_settings[spec]))\n else:\n self.logger.info(\"{}:\\t\\t{}\".format(\n spec, self.algorithm_settings[spec]))\n\n self.logger.info(\"\")\n\n\nclass EnasService(api_pb2_grpc.SuggestionServicer, HealthServicer):\n def __init__(self, logger=None):\n super(EnasService, self).__init__()\n self.is_first_run = True\n self.experiment = None\n if logger == None:\n self.logger = getLogger(__name__)\n FORMAT = '%(asctime)-15s Experiment %(experiment_name)s %(message)s'\n logging.basicConfig(format=FORMAT)\n handler = StreamHandler()\n handler.setLevel(INFO)\n self.logger.setLevel(INFO)\n self.logger.addHandler(handler)\n self.logger.propagate = False\n else:\n self.logger = logger\n\n if not os.path.exists(\"ctrl_cache/\"):\n os.makedirs(\"ctrl_cache/\")\n\n def ValidateAlgorithmSettings(self, request, context):\n self.logger.info(\"Validate Algorithm Settings start\")\n graph_config = request.experiment.spec.nas_config.graph_config\n\n # Validate GraphConfig\n # Check InputSize\n if not graph_config.input_sizes:\n return self.SetValidateContextError(context, \"Missing InputSizes in GraphConfig:\\n{}\".format(graph_config))\n\n # Check OutputSize\n if not graph_config.output_sizes:\n return self.SetValidateContextError(context, \"Missing OutputSizes in GraphConfig:\\n{}\".format(graph_config))\n\n # Check NumLayers\n if not graph_config.num_layers:\n return self.SetValidateContextError(context, \"Missing NumLayers in GraphConfig:\\n{}\".format(graph_config))\n\n # Validate each operation\n operations_list = list(\n request.experiment.spec.nas_config.operations.operation)\n for operation in operations_list:\n\n # Check OperationType\n if not operation.operation_type:\n return self.SetValidateContextError(context, \"Missing operationType in Operation:\\n{}\".format(operation))\n\n # Check ParameterConfigs\n if not operation.parameter_specs.parameters:\n return self.SetValidateContextError(context, \"Missing ParameterConfigs in Operation:\\n{}\".format(operation))\n\n # Validate each ParameterConfig in Operation\n parameters_list = list(operation.parameter_specs.parameters)\n for parameter in parameters_list:\n\n # Check Name\n if not parameter.name:\n return self.SetValidateContextError(context, \"Missing Name in ParameterConfig:\\n{}\".format(parameter))\n\n # Check ParameterType\n if not parameter.parameter_type:\n return self.SetValidateContextError(context, \"Missing ParameterType in ParameterConfig:\\n{}\".format(parameter))\n\n # Check List in Categorical or Discrete Type\n if parameter.parameter_type == api_pb2.CATEGORICAL or parameter.parameter_type == api_pb2.DISCRETE:\n if not parameter.feasible_space.list:\n return self.SetValidateContextError(context, \"Missing List in ParameterConfig.feasibleSpace:\\n{}\".format(parameter))\n\n # Check Max, Min, Step in Int or Double Type\n elif parameter.parameter_type == api_pb2.INT or parameter.parameter_type == api_pb2.DOUBLE:\n if not parameter.feasible_space.min and not parameter.feasible_space.max:\n return self.SetValidateContextError(context, \"Missing Max and Min in ParameterConfig.feasibleSpace:\\n{}\".format(parameter))\n\n if parameter.parameter_type == api_pb2.DOUBLE and (not parameter.feasible_space.step or float(parameter.feasible_space.step) <= 0):\n return self.SetValidateContextError(context, \"Step parameter should be > 0 in ParameterConfig.feasibleSpace:\\n{}\".format(parameter))\n\n # Validate Algorithm Settings\n settings_raw = request.experiment.spec.algorithm.algorithm_settings\n for setting in settings_raw:\n if setting.name in algorithmSettingsValidator.keys():\n if setting.name in enableNoneSettingsList and setting.value == \"None\":\n continue\n setting_type = algorithmSettingsValidator[setting.name][0]\n setting_range = algorithmSettingsValidator[setting.name][1]\n try:\n converted_value = setting_type(setting.value)\n except:\n return self.SetValidateContextError(context, \"Algorithm Setting {} must be {} type\".format(setting.name, setting_type.__name__))\n\n if setting_type == float:\n if converted_value <= setting_range[0] or (setting_range[1] != 'inf' and converted_value > setting_range[1]):\n return self.SetValidateContextError(context, \"Algorithm Setting {}: {} with {} type must be in range ({}, {}]\".format(\n setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]\n ))\n\n elif converted_value < setting_range[0]:\n return self.SetValidateContextError(context, \"Algorithm Setting {}: {} with {} type must be in range [{}, {})\".format(\n setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]\n ))\n else:\n return self.SetValidateContextError(context, \"Unknown Algorithm Setting name: {}\".format(setting.name))\n\n self.logger.info(\"All Experiment Settings are Valid\")\n return api_pb2.ValidateAlgorithmSettingsReply()\n\n def SetValidateContextError(self, context, error_message):\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n context.set_details(error_message)\n self.logger.info(error_message)\n return api_pb2.ValidateAlgorithmSettingsReply()\n\n def GetSuggestions(self, request, context):\n if self.is_first_run:\n self.experiment = EnasExperiment(request, self.logger)\n experiment = self.experiment\n if request.request_number > 0:\n experiment.num_trials = request.request_number\n self.logger.info(\"-\" * 100 + \"\\nSuggestion Step {} for Experiment {}\\n\".format(\n experiment.suggestion_step, experiment.experiment_name) + \"-\" * 100)\n\n self.logger.info(\"\")\n self.logger.info(\">>> RequestNumber:\\t\\t{}\".format(experiment.num_trials))\n self.logger.info(\"\")\n\n with experiment.tf_graph.as_default():\n saver = tf.compat.v1.train.Saver()\n ctrl = experiment.controller\n\n controller_ops = {\n \"loss\": ctrl.loss,\n \"entropy\": ctrl.sample_entropy,\n \"grad_norm\": ctrl.grad_norm,\n \"baseline\": ctrl.baseline,\n \"skip_rate\": ctrl.skip_rate,\n \"train_op\": ctrl.train_op,\n \"train_step\": ctrl.train_step,\n \"sample_arc\": ctrl.sample_arc,\n \"child_val_accuracy\": ctrl.child_val_accuracy,\n }\n\n if self.is_first_run:\n self.logger.info(\">>> First time running suggestion for {}. Random architecture will be given.\".format(\n experiment.experiment_name))\n with tf.compat.v1.Session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n candidates = list()\n for _ in range(experiment.num_trials):\n candidates.append(\n sess.run(controller_ops[\"sample_arc\"]))\n\n # TODO: will use PVC to store the checkpoint to protect against unexpected suggestion pod restart\n saver.save(sess, experiment.ctrl_cache_file)\n\n self.is_first_run = False\n\n else:\n with tf.compat.v1.Session() as sess:\n saver.restore(sess, experiment.ctrl_cache_file)\n\n result = self.GetEvaluationResult(request.trials)\n\n # TODO: (andreyvelich) I deleted this part, should it be handle by controller?\n # Sometimes training container may fail and GetEvaluationResult() will return None\n # In this case, the Suggestion will:\n # 1. Firstly try to respawn the previous trials after waiting for RESPAWN_SLEEP seconds\n # 2. If respawning the trials for RESPAWN_LIMIT times still cannot collect valid results,\n # then fail the task because it may indicate that the training container has errors.\n if result is None:\n self.logger.warning(\n \">>> Suggestion has spawned trials, but they all failed.\")\n self.logger.warning(\n \">>> Please check whether the training container is correctly implemented\")\n self.logger.info(\">>> Experiment {} failed\".format(\n experiment.experiment_name))\n return []\n\n # This LSTM network is designed to maximize the metrics\n # However, if the user wants to minimize the metrics, we can take the negative of the result\n\n if experiment.opt_direction == api_pb2.MINIMIZE:\n result = -result\n\n self.logger.info(\">>> Suggestion updated. LSTM Controller Training\\n\")\n log_every = experiment.algorithm_settings[\"controller_log_every_steps\"]\n for ctrl_step in range(1, experiment.algorithm_settings[\"controller_train_steps\"]+1):\n run_ops = [\n controller_ops[\"loss\"],\n controller_ops[\"entropy\"],\n controller_ops[\"grad_norm\"],\n controller_ops[\"baseline\"],\n controller_ops[\"skip_rate\"],\n controller_ops[\"train_op\"]\n ]\n\n loss, entropy, grad_norm, baseline, skip_rate, _ = sess.run(\n fetches=run_ops,\n feed_dict={controller_ops[\"child_val_accuracy\"]: result})\n\n controller_step = sess.run(controller_ops[\"train_step\"])\n if ctrl_step % log_every == 0:\n log_string = \"\"\n log_string += \"Controller Step: {} - \".format(controller_step)\n log_string += \"Loss: {:.4f} - \".format(loss)\n log_string += \"Entropy: {:.9} - \".format(entropy)\n log_string += \"Gradient Norm: {:.7f} - \".format(grad_norm)\n log_string += \"Baseline={:.4f} - \".format(baseline)\n log_string += \"Skip Rate={:.4f}\".format(skip_rate)\n self.logger.info(log_string)\n\n candidates = list()\n for _ in range(experiment.num_trials):\n candidates.append(\n sess.run(controller_ops[\"sample_arc\"]))\n\n saver.save(sess, experiment.ctrl_cache_file)\n\n organized_candidates = list()\n parameter_assignments = list()\n\n for i in range(experiment.num_trials):\n arc = candidates[i].tolist()\n organized_arc = [0 for _ in range(experiment.num_layers)]\n record = 0\n for l in range(experiment.num_layers):\n organized_arc[l] = arc[record: record + l + 1]\n record += l + 1\n organized_candidates.append(organized_arc)\n\n nn_config = dict()\n nn_config['num_layers'] = experiment.num_layers\n nn_config['input_sizes'] = experiment.input_sizes\n nn_config['output_sizes'] = experiment.output_sizes\n nn_config['embedding'] = dict()\n for l in range(experiment.num_layers):\n opt = organized_arc[l][0]\n nn_config['embedding'][opt] = experiment.search_space[opt].get_dict()\n\n organized_arc_json = json.dumps(organized_arc)\n nn_config_json = json.dumps(nn_config)\n\n organized_arc_str = str(organized_arc_json).replace('\\\"', '\\'')\n nn_config_str = str(nn_config_json).replace('\\\"', '\\'')\n\n self.logger.info(\n \"\\n>>> New Neural Network Architecture Candidate #{} (internal representation):\".format(i))\n self.logger.info(organized_arc_json)\n self.logger.info(\"\\n>>> Corresponding Seach Space Description:\")\n self.logger.info(nn_config_str)\n\n parameter_assignments.append(\n api_pb2.GetSuggestionsReply.ParameterAssignments(\n assignments=[\n api_pb2.ParameterAssignment(\n name=\"architecture\",\n value=organized_arc_str\n ),\n api_pb2.ParameterAssignment(\n name=\"nn_config\",\n value=nn_config_str\n )\n ]\n )\n )\n\n self.logger.info(\"\")\n self.logger.info(\">>> {} Trials were created for Experiment {}\".format(\n experiment.num_trials, experiment.experiment_name))\n self.logger.info(\"\")\n\n experiment.suggestion_step += 1\n\n return api_pb2.GetSuggestionsReply(parameter_assignments=parameter_assignments)\n\n def GetEvaluationResult(self, trials_list):\n completed_trials = dict()\n failed_trials = []\n for t in trials_list:\n if t.status.condition == api_pb2.TrialStatus.TrialConditionType.SUCCEEDED:\n target_value = None\n for metric in t.status.observation.metrics:\n if metric.name == t.spec.objective.objective_metric_name:\n target_value = metric.value\n break\n\n # Take only the first metric value\n # In current cifar-10 training container this value is the latest\n completed_trials[t.name] = float(target_value)\n\n if t.status.condition == api_pb2.TrialStatus.TrialConditionType.FAILED:\n failed_trials.append(t.name)\n\n n_completed = len(completed_trials)\n self.logger.info(\">>> By now: {} Trials succeeded, {} Trials failed\".format(\n n_completed, len(failed_trials)))\n for tname in completed_trials:\n self.logger.info(\"Trial: {}, Value: {}\".format(\n tname, completed_trials[tname]))\n for tname in failed_trials:\n self.logger.info(\"Trial: {} was failed\".format(tname))\n\n if n_completed > 0:\n avg_metrics = sum(completed_trials.values()) / n_completed\n self.logger.info(\"The average is {}\\n\".format(avg_metrics))\n\n return avg_metrics\n" ]
[ [ "tensorflow.Graph", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.global_variables_initializer" ] ]
raimonpv/NeuroKit
[ "cb37d83ee20d6a13a91c4848aa435f41e979e203" ]
[ "tests/tests_hrv.py" ]
[ "import numpy as np\n\nimport neurokit2 as nk\n\n\ndef test_hrv_time():\n ecg_slow = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=70, random_state=42)\n ecg_fast = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=110, random_state=42)\n\n _, peaks_slow = nk.ecg_process(ecg_slow, sampling_rate=1000)\n _, peaks_fast = nk.ecg_process(ecg_fast, sampling_rate=1000)\n\n hrv_slow = nk.hrv_time(peaks_slow, sampling_rate=1000)\n hrv_fast = nk.hrv_time(peaks_fast, sampling_rate=1000)\n\n assert np.all(hrv_fast[\"HRV_RMSSD\"] < hrv_slow[\"HRV_RMSSD\"])\n assert np.all(hrv_fast[\"HRV_MeanNN\"] < hrv_slow[\"HRV_MeanNN\"])\n assert np.all(hrv_fast[\"HRV_SDNN\"] < hrv_slow[\"HRV_SDNN\"])\n assert np.all(hrv_fast[\"HRV_CVNN\"] < hrv_slow[\"HRV_CVNN\"])\n assert np.all(hrv_fast[\"HRV_CVSD\"] < hrv_slow[\"HRV_CVSD\"])\n assert np.all(hrv_fast[\"HRV_MedianNN\"] < hrv_slow[\"HRV_MedianNN\"])\n assert np.all(hrv_fast[\"HRV_MadNN\"] < hrv_slow[\"HRV_MadNN\"])\n assert np.all(hrv_fast[\"HRV_MCVNN\"] < hrv_slow[\"HRV_MCVNN\"])\n assert np.all(hrv_fast[\"HRV_pNN50\"] == hrv_slow[\"HRV_pNN50\"])\n assert np.all(hrv_fast[\"HRV_pNN20\"] < hrv_slow[\"HRV_pNN20\"])\n assert np.all(hrv_fast[\"HRV_TINN\"] < hrv_slow[\"HRV_TINN\"])\n assert np.all(hrv_fast[\"HRV_HTI\"] > hrv_slow[\"HRV_HTI\"])\n\n\ndef test_hrv_frequency():\n # Test frequency domain\n ecg1 = nk.ecg_simulate(duration=60, sampling_rate=2000, heart_rate=70, random_state=42)\n _, peaks1 = nk.ecg_process(ecg1, sampling_rate=2000)\n hrv1 = nk.hrv_frequency(peaks1, sampling_rate=2000)\n\n ecg2 = nk.signal_resample(ecg1, sampling_rate=2000, desired_sampling_rate=500)\n _, peaks2 = nk.ecg_process(ecg2, sampling_rate=500)\n hrv2 = nk.hrv_frequency(peaks2, sampling_rate=500)\n\n assert np.allclose(hrv1[\"HRV_HF\"] - hrv2[\"HRV_HF\"], 0, atol=1.5)\n assert np.isnan(hrv1[\"HRV_LF\"][0])\n assert np.isnan(hrv2[\"HRV_LF\"][0])\n assert np.isnan(hrv1[\"HRV_VLF\"][0])\n assert np.isnan(hrv2[\"HRV_LF\"][0])\n\n\ndef test_hrv():\n\n ecg = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=110, random_state=42)\n\n _, peaks = nk.ecg_process(ecg, sampling_rate=1000)\n\n ecg_hrv = nk.hrv(peaks, sampling_rate=1000)\n\n columns = ['HRV_RMSSD', 'HRV_MeanNN', 'HRV_SDNN', 'HRV_SDSD', 'HRV_CVNN',\n 'HRV_CVSD', 'HRV_MedianNN', 'HRV_MadNN', 'HRV_MCVNN', 'HRV_IQRNN',\n 'HRV_pNN50', 'HRV_pNN20', 'HRV_TINN', 'HRV_HTI', 'HRV_ULF',\n 'HRV_VLF', 'HRV_LF', 'HRV_HF', 'HRV_VHF', 'HRV_LFHF', 'HRV_LFn',\n 'HRV_HFn', 'HRV_LnHF', 'HRV_SD1', 'HRV_SD2', 'HRV_SD1SD2', 'HRV_S',\n 'HRV_CSI', 'HRV_CVI', 'HRV_CSI_Modified', 'HRV_PIP', 'HRV_IALS',\n 'HRV_PSS', 'HRV_PAS', 'HRV_GI', 'HRV_SI', 'HRV_AI', 'HRV_PI',\n 'HRV_C1d', 'HRV_C1a', 'HRV_SD1d',\n 'HRV_SD1a', 'HRV_C2d',\n 'HRV_C2a', 'HRV_SD2d', 'HRV_SD2a',\n 'HRV_Cd', 'HRV_Ca', 'HRV_SDNNd',\n 'HRV_SDNNa', 'HRV_ApEn', 'HRV_SampEn']\n\n assert all(elem in np.array(ecg_hrv.columns.values, dtype=object) for elem\n in columns)" ]
[ [ "numpy.array", "numpy.allclose", "numpy.all", "numpy.isnan" ] ]
gumpy-bci/gumpy
[ "c51ee75ddf1eaa58813b493282014da6f31f5591" ]
[ "gumpy/split.py" ]
[ "import sklearn.model_selection\nimport numpy as np\nfrom sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit, cross_val_score, StratifiedKFold\n\n\ndef normal(X, labels, test_size):\n \"\"\"Split a dataset into training and test parts.\n Args:\n X (numpy.ndarray): 2D features matrix \n labels: labels vector \n test_size: size of the split\n \n Returns:\n A 2D CSP features matrix \n \"\"\"\n Y = labels\n X_train, X_test, Y_train, Y_test = \\\n sklearn.model_selection.train_test_split(X, Y,\n test_size=test_size,\n random_state=0)\n return X_train, X_test, Y_train, Y_test\n\n\ndef time_series_split(features, labels, n_splits):\n \"\"\"Split a dataset into n splits.\n\n \"\"\"\n xx = sklearn.model_selection.TimeSeriesSplit(n_splits)\n for train_index, test_index in xx.split(features):\n X_train, X_test = features[train_index], features[test_index]\n y_train, y_test = labels[train_index], labels[test_index]\n\n return X_train, X_test, y_train, y_test\n\n\ndef stratified_KFold(features, labels, n_splits):\n\n \"\"\"Stratified K-Folds cross-validator\n Stratification is the process of rearranging the data as to ensure each fold is a good representative of the whole\n and by also keeping the balance of classes\n \"\"\"\n skf = StratifiedKFold(n_splits)\n skf.get_n_splits(features, labels)\n for train_index, test_index in skf.split(features, labels):\n X_train, X_test = features[train_index], features[test_index]\n Y_train, Y_test = labels[train_index], labels[test_index]\n return X_train, X_test, Y_train, Y_test\n\n#Stratified ShuffleSplit cross-validator\ndef stratified_shuffle_Split(features, labels, n_splits,test_size,random_state):\n\n \"\"\"Stratified ShuffleSplit cross-validator\n \"\"\"\n cv = StratifiedShuffleSplit(n_splits, test_size, random_state=random_state)\n for train_index, test_index in cv.split(features,labels):\n X_train = features[train_index]\n X_test = features[test_index]\n Y_train = labels[train_index]\n Y_test = labels[test_index]\n return X_train, X_test, Y_train, Y_test\n\n\n#Random permutation cross-validator\ndef shuffle_Split(features, labels, n_splits,test_size,random_state):\n\n \"\"\"ShuffleSplit: Random permutation cross-validator\n \"\"\"\n cv = ShuffleSplit(n_splits, test_size, random_state=random_state)\n for train_index, test_index in cv.split(features):\n X_train = features[train_index]\n X_test = features[test_index]\n Y_train = labels[train_index]\n Y_test = labels[test_index]\n return X_train, X_test, Y_train, Y_test\n" ]
[ [ "sklearn.model_selection.StratifiedShuffleSplit", "sklearn.model_selection.StratifiedKFold", "sklearn.model_selection.ShuffleSplit" ] ]
segasai/desispec
[ "4786347a8ad44effa4985671423f7ba0129ba6c3", "4786347a8ad44effa4985671423f7ba0129ba6c3" ]
[ "py/desispec/scripts/stdstars.py", "py/desispec/fluxcalibration.py" ]
[ "\n\n\"\"\"\nGet the normalized best template to do flux calibration.\n\"\"\"\n\n#- TODO: refactor algorithmic code into a separate module/function\n\nimport argparse\nimport sys\n\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy import units\nfrom astropy.table import Table\n\nfrom desispec import io\nfrom desispec.fluxcalibration import match_templates,normalize_templates,isStdStar\nfrom desispec.interpolation import resample_flux\nfrom desiutil.log import get_logger\nfrom desispec.parallel import default_nproc\nfrom desispec.io.filters import load_legacy_survey_filter\nfrom desiutil.dust import ext_odonnell,extinction_total_to_selective_ratio\nfrom desispec.fiberbitmasking import get_fiberbitmasked_frame\n\ndef parse(options=None):\n parser = argparse.ArgumentParser(description=\"Fit of standard star spectra in frames.\")\n parser.add_argument('--frames', type = str, default = None, required=True, nargs='*',\n help = 'list of path to DESI frame fits files (needs to be same exposure, spectro)')\n parser.add_argument('--skymodels', type = str, default = None, required=True, nargs='*',\n help = 'list of path to DESI sky model fits files (needs to be same exposure, spectro)')\n parser.add_argument('--fiberflats', type = str, default = None, required=True, nargs='*',\n help = 'list of path to DESI fiberflats fits files (needs to be same exposure, spectro)')\n parser.add_argument('--starmodels', type = str, help = 'path of spectro-photometric stellar spectra fits')\n parser.add_argument('-o','--outfile', type = str, help = 'output file for normalized stdstar model flux')\n parser.add_argument('--ncpu', type = int, default = default_nproc, required = False, help = 'use ncpu for multiprocessing')\n parser.add_argument('--delta-color', type = float, default = 0.2, required = False, help = 'max delta-color for the selection of standard stars (on top of meas. errors)')\n parser.add_argument('--color', type = str, default = \"G-R\", choices=['G-R', 'R-Z'], required = False, help = 'color for selection of standard stars')\n parser.add_argument('--z-max', type = float, default = 0.008, required = False, help = 'max peculiar velocity (blue/red)shift range')\n parser.add_argument('--z-res', type = float, default = 0.00002, required = False, help = 'dz grid resolution')\n parser.add_argument('--template-error', type = float, default = 0.1, required = False, help = 'fractional template error used in chi2 computation (about 0.1 for BOSS b1)')\n parser.add_argument('--maxstdstars', type=int, default=30, \\\n help='Maximum number of stdstars to include')\n\n log = get_logger()\n args = None\n if options is None:\n args = parser.parse_args()\n cmd = ' '.join(sys.argv)\n else:\n args = parser.parse_args(options)\n cmd = 'desi_fit_stdstars ' + ' '.join(options)\n\n log.info('RUNNING {}'.format(cmd))\n\n return args\n\ndef safe_read_key(header,key) :\n value = None\n try :\n value=header[key]\n except KeyError :\n value = None\n pass\n if value is None : # second try\n value=header[key.ljust(8).upper()]\n return value\n\ndef dust_transmission(wave,ebv) :\n Rv = 3.1\n extinction = ext_odonnell(wave,Rv=Rv)\n return 10**(-Rv*extinction*ebv/2.5)\n\ndef main(args) :\n \"\"\" finds the best models of all standard stars in the frame\n and normlize the model flux. Output is written to a file and will be called for calibration.\n \"\"\"\n\n log = get_logger()\n\n log.info(\"mag delta %s = %f (for the pre-selection of stellar models)\"%(args.color,args.delta_color))\n log.info('multiprocess parallelizing with {} processes'.format(args.ncpu))\n\n # READ DATA\n ############################################\n # First loop through and group by exposure and spectrograph\n frames_by_expid = {}\n for filename in args.frames :\n log.info(\"reading %s\"%filename)\n frame=io.read_frame(filename)\n expid = safe_read_key(frame.meta,\"EXPID\")\n camera = safe_read_key(frame.meta,\"CAMERA\").strip().lower()\n spec = camera[1]\n uniq_key = (expid,spec)\n if uniq_key in frames_by_expid.keys():\n frames_by_expid[uniq_key][camera] = frame\n else:\n frames_by_expid[uniq_key] = {camera: frame}\n\n frames={}\n flats={}\n skies={}\n\n spectrograph=None\n starfibers=None\n starindices=None\n fibermap=None\n\n # For each unique expid,spec pair, get the logical OR of the FIBERSTATUS for all\n # cameras and then proceed with extracting the frame information\n # once we modify the fibermap FIBERSTATUS\n for (expid,spec),camdict in frames_by_expid.items():\n\n fiberstatus = None\n for frame in camdict.values():\n if fiberstatus is None:\n fiberstatus = frame.fibermap['FIBERSTATUS'].data.copy()\n else:\n fiberstatus |= frame.fibermap['FIBERSTATUS']\n\n for camera,frame in camdict.items():\n frame.fibermap['FIBERSTATUS'] |= fiberstatus\n # Set fibermask flagged spectra to have 0 flux and variance\n frame = get_fiberbitmasked_frame(frame,bitmask='stdstars',ivar_framemask=True)\n frame_fibermap = frame.fibermap\n frame_starindices = np.where(isStdStar(frame_fibermap))[0]\n\n #- Confirm that all fluxes have entries but trust targeting bits\n #- to get basic magnitude range correct\n keep = np.ones(len(frame_starindices), dtype=bool)\n\n for colname in ['FLUX_G', 'FLUX_R', 'FLUX_Z']: #- and W1 and W2?\n keep &= frame_fibermap[colname][frame_starindices] > 10**((22.5-30)/2.5)\n keep &= frame_fibermap[colname][frame_starindices] < 10**((22.5-0)/2.5)\n\n frame_starindices = frame_starindices[keep]\n\n if spectrograph is None :\n spectrograph = frame.spectrograph\n fibermap = frame_fibermap\n starindices=frame_starindices\n starfibers=fibermap[\"FIBER\"][starindices]\n\n elif spectrograph != frame.spectrograph :\n log.error(\"incompatible spectrographs %d != %d\"%(spectrograph,frame.spectrograph))\n raise ValueError(\"incompatible spectrographs %d != %d\"%(spectrograph,frame.spectrograph))\n elif starindices.size != frame_starindices.size or np.sum(starindices!=frame_starindices)>0 :\n log.error(\"incompatible fibermap\")\n raise ValueError(\"incompatible fibermap\")\n\n if not camera in frames :\n frames[camera]=[]\n\n frames[camera].append(frame)\n\n # possibly cleanup memory\n del frames_by_expid\n\n for filename in args.skymodels :\n log.info(\"reading %s\"%filename)\n sky=io.read_sky(filename)\n camera=safe_read_key(sky.header,\"CAMERA\").strip().lower()\n if not camera in skies :\n skies[camera]=[]\n skies[camera].append(sky)\n\n for filename in args.fiberflats :\n log.info(\"reading %s\"%filename)\n flat=io.read_fiberflat(filename)\n camera=safe_read_key(flat.header,\"CAMERA\").strip().lower()\n\n # NEED TO ADD MORE CHECKS\n if camera in flats:\n log.warning(\"cannot handle several flats of same camera (%s), will use only the first one\"%camera)\n #raise ValueError(\"cannot handle several flats of same camera (%s)\"%camera)\n else :\n flats[camera]=flat\n\n\n if starindices.size == 0 :\n log.error(\"no STD star found in fibermap\")\n raise ValueError(\"no STD star found in fibermap\")\n\n log.info(\"found %d STD stars\"%starindices.size)\n\n # log.warning(\"Not using flux errors for Standard Star fits!\")\n\n # DIVIDE FLAT AND SUBTRACT SKY , TRIM DATA\n ############################################\n # since poping dict, we need to copy keys to iterate over to avoid\n # RuntimeError due to changing dict\n frame_cams = list(frames.keys())\n for cam in frame_cams:\n\n if not cam in skies:\n log.warning(\"Missing sky for %s\"%cam)\n frames.pop(cam)\n continue\n if not cam in flats:\n log.warning(\"Missing flat for %s\"%cam)\n frames.pop(cam)\n continue\n\n flat=flats[cam]\n for frame,sky in zip(frames[cam],skies[cam]) :\n frame.flux = frame.flux[starindices]\n frame.ivar = frame.ivar[starindices]\n frame.ivar *= (frame.mask[starindices] == 0)\n frame.ivar *= (sky.ivar[starindices] != 0)\n frame.ivar *= (sky.mask[starindices] == 0)\n frame.ivar *= (flat.ivar[starindices] != 0)\n frame.ivar *= (flat.mask[starindices] == 0)\n frame.flux *= ( frame.ivar > 0) # just for clean plots\n for star in range(frame.flux.shape[0]) :\n ok=np.where((frame.ivar[star]>0)&(flat.fiberflat[star]!=0))[0]\n if ok.size > 0 :\n frame.flux[star] = frame.flux[star]/flat.fiberflat[star] - sky.flux[star]\n frame.resolution_data = frame.resolution_data[starindices]\n\n nframes=len(frames[cam])\n if nframes>1 :\n # optimal weights for the coaddition = ivar*throughput, not directly ivar,\n # we estimate the relative throughput with median fluxes at this stage\n medflux=np.zeros(nframes)\n for i,frame in enumerate(frames[cam]) :\n if np.sum(frame.ivar>0) == 0 :\n log.error(\"ivar=0 for all std star spectra in frame {}-{:08d}\".format(cam,frame.meta[\"EXPID\"]))\n else :\n medflux[i] = np.median(frame.flux[frame.ivar>0])\n log.debug(\"medflux = {}\".format(medflux))\n medflux *= (medflux>0)\n if np.sum(medflux>0)==0 :\n log.error(\"mean median flux = 0, for all stars in fibers {}\".format(list(frames[cam][0].fibermap[\"FIBER\"][starindices])))\n sys.exit(12)\n mmedflux = np.mean(medflux[medflux>0])\n weights=medflux/mmedflux\n log.info(\"coadding {} exposures in cam {}, w={}\".format(nframes,cam,weights))\n\n sw=np.zeros(frames[cam][0].flux.shape)\n swf=np.zeros(frames[cam][0].flux.shape)\n swr=np.zeros(frames[cam][0].resolution_data.shape)\n\n for i,frame in enumerate(frames[cam]) :\n sw += weights[i]*frame.ivar\n swf += weights[i]*frame.ivar*frame.flux\n swr += weights[i]*frame.ivar[:,None,:]*frame.resolution_data\n coadded_frame = frames[cam][0]\n coadded_frame.ivar = sw\n coadded_frame.flux = swf/(sw+(sw==0))\n coadded_frame.resolution_data = swr/((sw+(sw==0))[:,None,:])\n frames[cam] = [ coadded_frame ]\n\n\n # CHECK S/N\n ############################################\n # for each band in 'brz', record quadratic sum of median S/N across wavelength\n snr=dict()\n for band in ['b','r','z'] :\n snr[band]=np.zeros(starindices.size)\n for cam in frames :\n band=cam[0].lower()\n for frame in frames[cam] :\n msnr = np.median( frame.flux * np.sqrt( frame.ivar ) / np.sqrt(np.gradient(frame.wave)) , axis=1 ) # median SNR per sqrt(A.)\n msnr *= (msnr>0)\n snr[band] = np.sqrt( snr[band]**2 + msnr**2 )\n log.info(\"SNR(B) = {}\".format(snr['b']))\n\n ###############################\n max_number_of_stars = 50\n min_blue_snr = 4.\n ###############################\n indices=np.argsort(snr['b'])[::-1][:max_number_of_stars]\n\n validstars = np.where(snr['b'][indices]>min_blue_snr)[0]\n\n #- TODO: later we filter on models based upon color, thus throwing\n #- away very blue stars for which we don't have good models.\n\n log.info(\"Number of stars with median stacked blue S/N > {} /sqrt(A) = {}\".format(min_blue_snr,validstars.size))\n if validstars.size == 0 :\n log.error(\"No valid star\")\n sys.exit(12)\n\n validstars = indices[validstars]\n\n for band in ['b','r','z'] :\n snr[band]=snr[band][validstars]\n\n log.info(\"BLUE SNR of selected stars={}\".format(snr['b']))\n\n for cam in frames :\n for frame in frames[cam] :\n frame.flux = frame.flux[validstars]\n frame.ivar = frame.ivar[validstars]\n frame.resolution_data = frame.resolution_data[validstars]\n starindices = starindices[validstars]\n starfibers = starfibers[validstars]\n nstars = starindices.size\n fibermap = Table(fibermap[starindices])\n\n # MASK OUT THROUGHPUT DIP REGION\n ############################################\n mask_throughput_dip_region = True\n if mask_throughput_dip_region :\n wmin=4300.\n wmax=4500.\n log.warning(\"Masking out the wavelength region [{},{}]A in the standard star fit\".format(wmin,wmax))\n for cam in frames :\n for frame in frames[cam] :\n ii=np.where( (frame.wave>=wmin)&(frame.wave<=wmax) )[0]\n if ii.size>0 :\n frame.ivar[:,ii] = 0\n\n # READ MODELS\n ############################################\n log.info(\"reading star models in %s\"%args.starmodels)\n stdwave,stdflux,templateid,teff,logg,feh=io.read_stdstar_templates(args.starmodels)\n\n # COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG\n ############################################\n\n #- Support older fibermaps\n if 'PHOTSYS' not in fibermap.colnames:\n log.warning('Old fibermap format; using defaults for missing columns')\n log.warning(\" PHOTSYS = 'S'\")\n log.warning(\" EBV = 0.0\")\n fibermap['PHOTSYS'] = 'S'\n fibermap['EBV'] = 0.0\n\n model_filters = dict()\n for band in [\"G\",\"R\",\"Z\"] :\n for photsys in np.unique(fibermap['PHOTSYS']) :\n model_filters[band+photsys] = load_legacy_survey_filter(band=band,photsys=photsys)\n\n log.info(\"computing model mags for %s\"%sorted(model_filters.keys()))\n model_mags = dict()\n fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom\n for filter_name, filter_response in model_filters.items():\n model_mags[filter_name] = filter_response.get_ab_magnitude(stdflux*fluxunits,stdwave)\n log.info(\"done computing model mags\")\n\n # LOOP ON STARS TO FIND BEST MODEL\n ############################################\n linear_coefficients=np.zeros((nstars,stdflux.shape[0]))\n chi2dof=np.zeros((nstars))\n redshift=np.zeros((nstars))\n normflux=[]\n\n star_mags = dict()\n star_unextincted_mags = dict()\n\n photometric_systems = np.unique(fibermap['PHOTSYS'])\n for band in ['G', 'R', 'Z']:\n star_mags[band] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band])\n star_unextincted_mags[band] = np.zeros(star_mags[band].shape)\n for photsys in photometric_systems :\n r_band = extinction_total_to_selective_ratio(band , photsys) # dimensionless\n # r_band = a_band / E(B-V)\n # E(B-V) is a difference of magnitudes (dimensionless)\n # a_band = -2.5*log10(effective dust transmission) , dimensionless\n # effective dust transmission =\n # integral( SED(lambda) * filter_transmission(lambda,band) * milkyway_dust_transmission(lambda,E(B-V)) dlamdba)\n # / integral( SED(lambda) * filter_transmission(lambda,band) dlamdba)\n selection = (fibermap['PHOTSYS'] == photsys)\n a_band = r_band * fibermap['EBV'][selection] # dimensionless\n star_unextincted_mags[band][selection] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band][selection]) - a_band\n\n star_colors = dict()\n star_colors['G-R'] = star_mags['G'] - star_mags['R']\n star_colors['R-Z'] = star_mags['R'] - star_mags['Z']\n\n star_unextincted_colors = dict()\n star_unextincted_colors['G-R'] = star_unextincted_mags['G'] - star_unextincted_mags['R']\n star_unextincted_colors['R-Z'] = star_unextincted_mags['R'] - star_unextincted_mags['Z']\n\n fitted_model_colors = np.zeros(nstars)\n\n for star in range(nstars) :\n\n log.info(\"finding best model for observed star #%d\"%star)\n\n # np.array of wave,flux,ivar,resol\n wave = {}\n flux = {}\n ivar = {}\n resolution_data = {}\n for camera in frames :\n for i,frame in enumerate(frames[camera]) :\n identifier=\"%s-%d\"%(camera,i)\n wave[identifier]=frame.wave\n flux[identifier]=frame.flux[star]\n ivar[identifier]=frame.ivar[star]\n resolution_data[identifier]=frame.resolution_data[star]\n\n # preselect models based on magnitudes\n photsys=fibermap['PHOTSYS'][star]\n if not args.color in ['G-R','R-Z'] :\n raise ValueError('Unknown color {}'.format(args.color))\n bands=args.color.split(\"-\")\n model_colors = model_mags[bands[0]+photsys] - model_mags[bands[1]+photsys]\n\n color_diff = model_colors - star_unextincted_colors[args.color][star]\n selection = np.abs(color_diff) < args.delta_color\n if np.sum(selection) == 0 :\n log.warning(\"no model in the selected color range for this star\")\n continue\n\n\n # smallest cube in parameter space including this selection (needed for interpolation)\n new_selection = (teff>=np.min(teff[selection]))&(teff<=np.max(teff[selection]))\n new_selection &= (logg>=np.min(logg[selection]))&(logg<=np.max(logg[selection]))\n new_selection &= (feh>=np.min(feh[selection]))&(feh<=np.max(feh[selection]))\n selection = np.where(new_selection)[0]\n\n log.info(\"star#%d fiber #%d, %s = %f, number of pre-selected models = %d/%d\"%(\n star, starfibers[star], args.color, star_unextincted_colors[args.color][star],\n selection.size, stdflux.shape[0]))\n\n # Match unextincted standard stars to data\n coefficients, redshift[star], chi2dof[star] = match_templates(\n wave, flux, ivar, resolution_data,\n stdwave, stdflux[selection],\n teff[selection], logg[selection], feh[selection],\n ncpu=args.ncpu, z_max=args.z_max, z_res=args.z_res,\n template_error=args.template_error\n )\n\n linear_coefficients[star,selection] = coefficients\n\n log.info('Star Fiber: {}; TEFF: {:.3f}; LOGG: {:.3f}; FEH: {:.3f}; Redshift: {:g}; Chisq/dof: {:.3f}'.format(\n starfibers[star],\n np.inner(teff,linear_coefficients[star]),\n np.inner(logg,linear_coefficients[star]),\n np.inner(feh,linear_coefficients[star]),\n redshift[star],\n chi2dof[star])\n )\n\n # Apply redshift to original spectrum at full resolution\n model=np.zeros(stdwave.size)\n redshifted_stdwave = stdwave*(1+redshift[star])\n for i,c in enumerate(linear_coefficients[star]) :\n if c != 0 :\n model += c*np.interp(stdwave,redshifted_stdwave,stdflux[i])\n\n # Apply dust extinction to the model\n log.info(\"Applying MW dust extinction to star {} with EBV = {}\".format(star,fibermap['EBV'][star]))\n model *= dust_transmission(stdwave, fibermap['EBV'][star])\n\n # Compute final color of dust-extincted model\n photsys=fibermap['PHOTSYS'][star]\n if not args.color in ['G-R','R-Z'] :\n raise ValueError('Unknown color {}'.format(args.color))\n bands=args.color.split(\"-\")\n model_mag1 = model_filters[bands[0]+photsys].get_ab_magnitude(model*fluxunits, stdwave)\n model_mag2 = model_filters[bands[1]+photsys].get_ab_magnitude(model*fluxunits, stdwave)\n fitted_model_colors[star] = model_mag1 - model_mag2\n if bands[0]==\"R\" :\n model_magr = model_mag1\n elif bands[1]==\"R\" :\n model_magr = model_mag2\n\n #- TODO: move this back into normalize_templates, at the cost of\n #- recalculating a model magnitude?\n\n # Normalize the best model using reported magnitude\n scalefac=10**((model_magr - star_mags['R'][star])/2.5)\n\n log.info('scaling R mag {:.3f} to {:.3f} using scale {}'.format(model_magr, star_mags['R'][star], scalefac))\n normflux.append(model*scalefac)\n\n # Now write the normalized flux for all best models to a file\n normflux=np.array(normflux)\n\n fitted_stars = np.where(chi2dof != 0)[0]\n if fitted_stars.size == 0 :\n log.error(\"No star has been fit.\")\n sys.exit(12)\n\n data={}\n data['LOGG']=linear_coefficients[fitted_stars,:].dot(logg)\n data['TEFF']= linear_coefficients[fitted_stars,:].dot(teff)\n data['FEH']= linear_coefficients[fitted_stars,:].dot(feh)\n data['CHI2DOF']=chi2dof[fitted_stars]\n data['REDSHIFT']=redshift[fitted_stars]\n data['COEFF']=linear_coefficients[fitted_stars,:]\n data['DATA_%s'%args.color]=star_colors[args.color][fitted_stars]\n data['MODEL_%s'%args.color]=fitted_model_colors[fitted_stars]\n data['BLUE_SNR'] = snr['b'][fitted_stars]\n data['RED_SNR'] = snr['r'][fitted_stars]\n data['NIR_SNR'] = snr['z'][fitted_stars]\n io.write_stdstar_models(args.outfile,normflux,stdwave,starfibers[fitted_stars],data)\n", "\"\"\"\ndesispec.fluxcalibration\n========================\n\nFlux calibration routines.\n\"\"\"\nfrom __future__ import absolute_import\nimport numpy as np\nfrom .resolution import Resolution\nfrom .linalg import cholesky_solve, cholesky_solve_and_invert, spline_fit\nfrom .interpolation import resample_flux\nfrom desiutil.log import get_logger\nfrom .io.filters import load_legacy_survey_filter\nfrom desispec import util\nfrom desispec.frame import Frame\nfrom desitarget.targets import main_cmx_or_sv\nimport scipy, scipy.sparse, scipy.ndimage\nimport sys\nimport time\nfrom astropy import units\nimport multiprocessing\nfrom pkg_resources import resource_exists, resource_filename\nimport numpy.linalg\nimport copy\n\ntry:\n from scipy import constants\n C_LIGHT = constants.c/1000.0\nexcept TypeError: # This can happen during documentation builds.\n C_LIGHT = 299792458.0/1000.0\n\ndef isStdStar(fibermap, bright=None):\n \"\"\"\n Determines if target(s) are standard stars\n\n Args:\n fibermap: table including DESI_TARGET or SV1_DESI_TARGET bit mask(s)\n\n Optional:\n bright: if True, only bright time standards; if False, only darktime, otherwise both\n\n Returns bool or array of bool\n \"\"\"\n log = get_logger()\n target_colnames, target_masks, survey = main_cmx_or_sv(fibermap)\n desi_target = fibermap[target_colnames[0]] # (SV1_)DESI_TARGET\n mws_target = fibermap[target_colnames[2]] # (SV1_)MWS_TARGET\n desi_mask = target_masks[0] # (sv1_)desi_mask\n mws_mask = target_masks[2] # (sv1_)mws_mask\n\n # mapping of which stdstar bits to use depending upon `bright` input\n # NOTE: STD_WD and GAIA_STD_WD not yet included in stdstar fitting\n desiDict ={\n None:['STD_FAINT','STD_BRIGHT', 'SV0_STD_FAINT', 'SV0_STD_BRIGHT'],\n True: ['STD_BRIGHT', 'SV0_STD_BRIGHT'],\n False: ['STD_FAINT', 'SV0_STD_FAINT']\n }\n mwsDict ={\n None:['GAIA_STD_FAINT','GAIA_STD_BRIGHT'],\n True:['GAIA_STD_BRIGHT'],\n False:['GAIA_STD_FAINT'],\n }\n\n yes = np.zeros_like(desi_target, dtype=bool)\n for k in desiDict[bright]:\n if k in desi_mask.names():\n yes = yes | ((desi_target & desi_mask[k])!=0)\n yes_mws = np.zeros_like(desi_target, dtype=bool)\n for k in mwsDict[bright]:\n if k in mws_mask.names():\n yes_mws |= ((mws_target & mws_mask[k])!=0)\n yes = yes | yes_mws\n\n #- Hack for data on 20201214 where some fiberassign files had targeting\n #- bits set to 0, but column FA_TYPE was still ok\n #- Hardcode mask to avoid fiberassign dependency loop\n FA_STDSTAR_MASK = 2 # fiberassing.targets.TARGET_TYPE_STANDARD\n if np.count_nonzero(yes) == 0:\n log.error(f'No standard stars found in {target_colnames[0]} or {target_colnames[2]}')\n if 'FA_TYPE' in fibermap.dtype.names and \\\n np.sum((fibermap['FA_TYPE'] & FA_STDSTAR_MASK) != 0) > 0:\n log.warning('Using FA_TYPE to find standard stars instead')\n yes = (fibermap['FA_TYPE'] & FA_STDSTAR_MASK) != 0\n\n return yes\n\ndef applySmoothingFilter(flux,width=200) :\n \"\"\" Return a smoothed version of the input flux array using a median filter\n\n Args:\n flux : 1D array of flux\n width : size of the median filter box\n\n Returns:\n smooth_flux : median filtered flux of same size as input\n \"\"\"\n\n # it was checked that the width of the median_filter has little impact on best fit stars\n # smoothing the ouput (with a spline for instance) does not improve the fit\n return scipy.ndimage.filters.median_filter(flux,width,mode='constant')\n#\n# Import some global constants.\n#\n# Why not use astropy constants?\n#\n# This is VERY inconvenient when trying to build documentation!\n# The documentation may be build in an environment that does not have\n# scipy installed. There is no obvious reason why this has to be a module-level\n# calculation.\n#\nimport scipy.constants as const\nh=const.h\npi=const.pi\ne=const.e\nc=const.c\nerg=const.erg\ntry:\n hc = const.h/const.erg*const.c*1.e10 # (in units of ergsA)\nexcept TypeError:\n hc = 1.9864458241717586e-08\n\ndef resample_template(data_wave_per_camera,resolution_data_per_camera,template_wave,template_flux,template_id) :\n \"\"\"Resample a spectral template on the data wavelength grid. Then convolve the spectra by the resolution\n for each camera. Also returns the result of applySmoothingFilter. This routine is used internally in\n a call to multiprocessing.Pool.\n\n Args:\n data_wave_per_camera : A dictionary of 1D array of vacuum wavelengths [Angstroms], one entry per camera and exposure.\n resolution_data_per_camera : A dictionary of resolution corresponding for the fiber, one entry per camera and exposure.\n template_wave : 1D array, input spectral template wavelength [Angstroms] (arbitrary spacing).\n template_flux : 1D array, input spectral template flux density.\n template_id : int, template identification index, used to ensure matching of input/output after a multiprocessing run.\n\n Returns:\n template_id : int, template identification index, same as input.\n output_wave : A dictionary of 1D array of vacuum wavelengths\n output_flux : A dictionary of 1D array of output template flux\n output_norm : A dictionary of 1D array of output template smoothed flux\n \"\"\"\n output_wave=np.array([])\n output_flux=np.array([])\n output_norm=np.array([])\n sorted_keys = list(data_wave_per_camera.keys())\n sorted_keys.sort() # force sorting the keys to agree with data (found unpredictable ordering in tests)\n for cam in sorted_keys :\n flux1=resample_flux(data_wave_per_camera[cam],template_wave,template_flux) # this is slow\n flux2=Resolution(resolution_data_per_camera[cam]).dot(flux1) # this is slow\n norme=applySmoothingFilter(flux2) # this is fast\n flux3=flux2/(norme+(norme==0))\n output_flux = np.append(output_flux,flux3)\n output_norm = np.append(output_norm,norme)\n output_wave = np.append(output_wave,data_wave_per_camera[cam]) # need to add wave to avoid wave/flux matching errors\n return template_id,output_wave,output_flux,output_norm\n\n\ndef _func(arg) :\n \"\"\" Used for multiprocessing.Pool \"\"\"\n return resample_template(**arg)\n\ndef _smooth_template(template_id,camera_index,template_flux) :\n \"\"\" Used for multiprocessing.Pool \"\"\"\n norme = applySmoothingFilter(template_flux)\n return template_id,camera_index,norme\n\ndef _func2(arg) :\n \"\"\" Used for multiprocessing.Pool \"\"\"\n return _smooth_template(**arg)\n\ndef redshift_fit(wave, flux, ivar, resolution_data, stdwave, stdflux, z_max=0.005, z_res=0.00005, template_error=0.):\n \"\"\" Redshift fit of a single template\n\n Args:\n wave : A dictionary of 1D array of vacuum wavelengths [Angstroms]. Example below.\n flux : A dictionary of 1D observed flux for the star\n ivar : A dictionary 1D inverse variance of flux\n resolution_data: resolution corresponding to the star's fiber\n stdwave : 1D standard star template wavelengths [Angstroms]\n stdflux : 1D[nwave] template flux\n z_max : float, maximum blueshift and redshift in scan, has to be positive\n z_res : float, step of of redshift scan between [-z_max,+z_max]\n template_error : float, assumed template flux relative error\n\n Returns:\n redshift : redshift of standard star\n\n\n Notes:\n - wave and stdwave can be on different grids that don't\n necessarily overlap\n - wave does not have to be uniform or monotonic. Multiple cameras\n can be supported by concatenating their wave and flux arrays\n \"\"\"\n cameras = list(flux.keys())\n log = get_logger()\n log.debug(time.asctime())\n\n # resampling on a log wavelength grid\n #####################################\n # need to go fast so we resample both data and model on a log grid\n\n # define grid\n minwave = 100000.\n maxwave = 0.\n for cam in cameras :\n minwave=min(minwave,np.min(wave[cam]))\n maxwave=max(maxwave,np.max(wave[cam]))\n # ala boss\n lstep=np.log10(1+z_res)\n margin=int(np.log10(1+z_max)/lstep)+1\n minlwave=np.log10(minwave)\n maxlwave=np.log10(maxwave) # desired, but readjusted\n nstep=(maxlwave-minlwave)/lstep\n\n resampled_lwave=minlwave+lstep*np.arange(nstep)\n resampled_wave=10**resampled_lwave\n\n # map data on grid\n resampled_data={}\n resampled_ivar={}\n resampled_model={}\n for cam in cameras :\n tmp_flux,tmp_ivar=resample_flux(resampled_wave,wave[cam],flux[cam],ivar[cam])\n resampled_data[cam]=tmp_flux\n resampled_ivar[cam]=tmp_ivar\n\n # we need to have the model on a larger grid than the data wave for redshifting\n dwave=wave[cam][-1]-wave[cam][-2]\n npix=int((wave[cam][-1]*z_max)/dwave+2)\n extended_cam_wave=np.append( wave[cam][0]+dwave*np.arange(-npix,0) , wave[cam])\n extended_cam_wave=np.append( extended_cam_wave, wave[cam][-1]+dwave*np.arange(1,npix+1))\n # ok now we also need to increase the resolution\n tmp_res=np.zeros((resolution_data[cam].shape[0],resolution_data[cam].shape[1]+2*npix))\n tmp_res[:,:npix] = np.tile(resolution_data[cam][:,0],(npix,1)).T\n tmp_res[:,npix:-npix] = resolution_data[cam]\n tmp_res[:,-npix:] = np.tile(resolution_data[cam][:,-1],(npix,1)).T\n # resampled model at camera resolution, with margin\n tmp=resample_flux(extended_cam_wave,stdwave,stdflux)\n tmp=Resolution(tmp_res).dot(tmp)\n # map on log lam grid\n resampled_model[cam]=resample_flux(resampled_wave,extended_cam_wave,tmp)\n\n # we now normalize both model and data\n tmp=applySmoothingFilter(resampled_data[cam])\n resampled_data[cam]/=(tmp+(tmp==0))\n resampled_ivar[cam]*=tmp**2\n\n if template_error>0 :\n ok=np.where(resampled_ivar[cam]>0)[0]\n if ok.size > 0 :\n resampled_ivar[cam][ok] = 1./ ( 1/resampled_ivar[cam][ok] + template_error**2 )\n\n tmp=applySmoothingFilter(resampled_model[cam])\n resampled_model[cam]/=(tmp+(tmp==0))\n resampled_ivar[cam]*=(tmp!=0)\n\n # fit the best redshift\n chi2=np.zeros((2*margin+1))\n ndata=np.zeros((2*margin+1))\n for i in range(-margin,margin+1) :\n for cam in cameras :\n ndata[i+margin] += np.sum(resampled_ivar[cam][margin:-margin]>0)\n if i<margin :\n chi2[i+margin] += np.sum(resampled_ivar[cam][margin:-margin]*(resampled_data[cam][margin:-margin]-resampled_model[cam][margin+i:-margin+i])**2)\n else :\n chi2[i+margin] += np.sum(resampled_ivar[cam][margin:-margin]*(resampled_data[cam][margin:-margin]-resampled_model[cam][margin+i:])**2)\n\n i=np.argmin(chi2)-margin\n z=10**(-i*lstep)-1\n log.debug(\"Best z=%f\"%z)\n '''\n log.debug(\"i=%d\"%i)\n log.debug(\"lstep=%f\"%lstep)\n log.debug(\"margin=%d\"%margin)\n plt.figure()\n #plt.plot(chi2)\n for cam in cameras :\n ok=np.where(resampled_ivar[cam]>0)[0]\n #plt.plot(resampled_wave[ok],resampled_data[cam][ok],\"o\",c=\"gray\")\n plt.errorbar(resampled_wave[ok],resampled_data[cam][ok],1./np.sqrt(resampled_ivar[cam][ok]),fmt=\"o\",color=\"gray\")\n plt.plot(resampled_wave[margin:-margin],resampled_model[cam][margin+i:-margin+i],\"-\",c=\"r\")\n plt.show()\n '''\n return z\n\n\ndef _compute_coef(coord,node_coords) :\n \"\"\" Function used by interpolate_on_parameter_grid2\n\n Args:\n coord : 1D array of coordinates of size n_axis\n node_coords : 2D array of coordinates of nodes, shape = (n_nodes,n_axis)\n\n Returns:\n coef : 1D array of linear coefficients for each node, size = n_nodes\n \"\"\"\n\n n_nodes=node_coords.shape[0]\n npar=node_coords.shape[1]\n coef=np.ones(n_nodes)\n for s in range(n_nodes) :\n coef[s]=1.\n for a in range(npar) :\n dist=np.abs(node_coords[s,a]-coord[a]) # distance between model point and node along axis a\n\n # piece-wise linear version\n if dist>1 :\n coef[s]=0.\n break\n coef[s] *= (1.-dist)\n\n # we could alternatively have used b-spline of higher order\n\n norme=np.sum(coef)\n if norme<=0 : # we are outside of valid grid\n return np.zeros(coef.shape) # will be detected in fitter\n coef /= norme\n return coef\n\n\ndef interpolate_on_parameter_grid(data_wave, data_flux, data_ivar, template_flux, teff, logg, feh, template_chi2) :\n \"\"\" 3D Interpolation routine among templates based on a grid of parameters teff, logg, feh.\n The tricky part is to define a cube on the parameter grid populated with templates, and it is not always possible.\n The routine never extrapolates, so that we stay in the range of input parameters.\n\n Args:\n data_wave : 1D[nwave] array of wavelength (concatenated list of input wavelength of different cameras and exposures)\n data_flux : 1D[nwave] array of normalized flux = (input flux)/median_filter(input flux) (concatenated list)\n data_ivar : 1D[nwave] array of inverse variance of normalized flux\n template_flux : 2D[ntemplates,nwave] array of normalized flux of templates (after resample, convolution and division by median_filter)\n teff : 1D[ntemplates]\n logg : 1D[ntemplates]\n feh : 1D[ntemplates]\n template_chi2 : 1D[ntemplatess] array of precomputed chi2 = sum(data_ivar*(data_flux-template_flux)**2)\n\n Returns:\n coefficients : best fit coefficient of linear combination of templates\n chi2 : chi2 of the linear combination\n \"\"\"\n\n log = get_logger()\n log.debug(\"starting interpolation on grid\")\n\n best_model_id = np.argmin(template_chi2)\n ndata=np.sum(data_ivar>0)\n\n log.debug(\"best model id=%d chi2/ndata=%f teff=%d logg=%2.1f feh=%2.1f\"%(best_model_id,template_chi2[best_model_id]/ndata,teff[best_model_id],logg[best_model_id],feh[best_model_id]))\n\n ntemplates=template_flux.shape[0]\n\n log_linear = False # if True , model = exp( sum_i a_i * log(template_flux_i) ), else model = sum_i a_i * template_flux_i\n\n # physical parameters define axes\n npar=3\n param=np.zeros((npar,ntemplates))\n param[0]=teff\n param[1]=logg\n param[2]=feh\n\n # grid nodes coordinates (unique values of the parameters)\n uparam=[]\n for a in range(npar) :\n uparam.append(np.unique(param[a]))\n #for a in range(npar) :\n # log.debug(\"param %d : %s\"%(a,str(uparam[a])))\n\n\n node_grid_coords=np.zeros((npar,3)).astype(int)\n for a in range(npar) : # a is an axis\n # this is the coordinate on axis 'a' of the best node\n i=np.where(uparam[a]==param[a,best_model_id])[0][0]\n node_grid_coords[a]=np.array([i-1,i,i+1])\n log.debug(\"node_grid_coords[%d]=%s\"%(a,node_grid_coords[a]))\n\n # we don't always have a template on all nodes\n node_template_ids=[]\n node_cube_coords=[]\n for i0,j0 in zip(node_grid_coords[0],[-1,0,1]) :\n for i1,j1 in zip(node_grid_coords[1],[-1,0,1]) :\n for i2,j2 in zip(node_grid_coords[2],[-1,0,1]) :\n\n # check whether coord is in grid\n in_grid = (i0>=0)&(i0<uparam[0].size)&(i1>=0)&(i1<uparam[1].size)&(i2>=0)&(i2<uparam[2].size)\n if not in_grid :\n continue\n # check whether there is a template on this node\n selection=np.where((param[0]==uparam[0][i0])&(param[1]==uparam[1][i1])&(param[2]==uparam[2][i2]))[0]\n if selection.size == 0 : # no template on node\n log.debug(\"not template for params = %f,%f,%f\"%(uparam[0][i0],uparam[1][i1],uparam[2][i2]))\n continue\n # we have one\n node_cube_coords.append([j0,j1,j2])\n node_template_ids.append(selection[0])\n node_template_ids=np.array(node_template_ids).astype(int)\n node_cube_coords=np.array(node_cube_coords).astype(int)\n\n # the parameters of the fit are npar coordinates in the range [-1,1] centered on best fit node\n coord=np.zeros(npar)\n\n n_templates = node_template_ids.size\n\n # we are done with the indexing and choice of template nodes\n node_template_flux = template_flux[node_template_ids]\n\n # compute all weighted scalar products among templates (only works if linear combination, not the log version)\n HB=np.zeros(n_templates)\n HA=np.zeros((n_templates,n_templates))\n for t in range(n_templates) :\n HB[t] = np.sum(data_ivar*data_flux*node_template_flux[t])\n for t2 in range(n_templates) :\n if HA[t2,t] != 0 :\n HA[t,t2] = HA[t2,t]\n else :\n HA[t,t2] = np.sum(data_ivar*node_template_flux[t]*node_template_flux[t2])\n\n chi2_0 = np.sum(data_ivar*data_flux**2)\n\n # chi2 = np.sum(data_ivar*(data_flux-model)**2)\n # = chi2_0 - 2*np.sum(data_ivar*data_flux*model) + np.sum(data_ivar*model**2)\n # model = sum_i coef_i model_i\n # chi2 = chi2_0 - 2* sum_i coef_i * HB[i] + sum_ij coef_i * coef_j * HA[i,j]\n # chi2 = chi2_0 - 2*np.inner(coef,HB) + np.inner(coef,HA.dot(coef))\n\n\n # initial state\n coef = _compute_coef(coord,node_cube_coords)\n chi2 = chi2_0 - 2*np.inner(coef,HB) + np.inner(coef,HA.dot(coef))\n log.debug(\"init coord=%s chi2/ndata=%f\"%(coord,chi2/ndata))\n\n # now we have to do the fit\n # fitting one axis at a time (simultaneous fit of 3 axes was tested and found inefficient : rapidly stuck on edges)\n # it has to be iterative because the model is a non-linear combination of parameters w, ex: w[0]*(1-w[1])*(1-w[2])\n for loop in range(50) :\n\n previous_chi2=chi2.copy()\n previous_coord=coord.copy()\n\n for a in range(npar) :\n previous_chi2_a=chi2.copy()\n\n # it's a linear combination of templates, but the model is non-linear function of coordinates\n # so there is no gain in trying to fit robustly with Gauss-Newton, we simply do a scan\n # it is converging rapidely (need however to iterate on axes)\n xcoord=coord.copy()\n xx=np.linspace(-1,1,41) # keep points on nodes , 41 is the resolution, 0.05 of node inter-distance\n chi2=np.zeros(xx.shape)\n for i,x in enumerate(xx) :\n xcoord[a]=x\n coef = _compute_coef(xcoord,node_cube_coords)\n if np.sum(coef)==0 : # outside valid range\n chi2[i]=1e20\n else :\n chi2[i] = chi2_0 - 2*np.inner(coef,HB) + np.inner(coef,HA.dot(coef))\n\n ibest=np.argmin(chi2)\n chi2=chi2[ibest]\n coord[a]=xx[ibest]\n\n log.debug(\"loop #%d coord=%s chi2/ndata=%f (-dchi2_loop=%f -dchi2_tot=%f)\"%(loop,coord,chi2/ndata,previous_chi2-chi2,template_chi2[best_model_id]-chi2))\n diff=np.max(np.abs(coord-previous_coord))\n if diff < 0.001 :\n break\n\n # finally perform an exact best fit per axis\n for loop in range(50) :\n previous_chi2=chi2.copy()\n previous_coord=coord.copy()\n for a in range(npar) :\n if coord[a]==-1 or coord[a]==1 :\n continue # we are on edge, no gain in refitting\n xcoord=coord.copy()\n coef_minus = _compute_coef(xcoord,node_cube_coords)\n eps=0.001\n xcoord[a] += eps\n coef_plus = _compute_coef(xcoord,node_cube_coords)\n dcoef_dcoord = (coef_plus-coef_minus)/eps # do a numeric derivative\n #log.debug(\"dcoef_dcoord=%s\"%dcoef_dcoord)\n B = np.inner(dcoef_dcoord,HB) - np.inner(dcoef_dcoord,HA.dot(coef_minus))\n A = np.inner(dcoef_dcoord,HA.dot(dcoef_dcoord))\n if A>0 :\n dcoord=B/A\n #log.debug(\"dcoord=%f\"%dcoord)\n tmp_coord=coord.copy()\n tmp_coord[a] += dcoord\n if tmp_coord[a]<-1 or tmp_coord[a]>1 :\n #log.debug(\"do not allow extrapolations\")\n continue\n coef = _compute_coef(tmp_coord,node_cube_coords)\n tmp_chi2 = chi2_0 - 2*np.inner(coef,HB) + np.inner(coef,HA.dot(coef))\n if tmp_chi2 < chi2 :\n log.debug(\"Improved chi2 by %f with a shift along %d of %f\"%(chi2-tmp_chi2,a,dcoord))\n coord=tmp_coord\n chi2 = tmp_chi2\n diff=np.max(np.abs(coord-previous_coord))\n if diff < 0.001 :\n break\n\n coef = _compute_coef(coord,node_cube_coords)\n chi2 = chi2_0 - 2*np.inner(coef,HB) + np.inner(coef,HA.dot(coef))\n\n input_number_of_templates=template_flux.shape[0]\n final_coefficients=np.zeros(input_number_of_templates)\n final_coefficients[node_template_ids]=coef\n\n log.debug(\"COORD=%s\"%coord)\n log.debug(\"COEF=%s\"%coef)\n #for i in np.where(final_coefficients>0)[0] :\n # log.debug(\"TEFF[%d]=%f\"%(i,teff[i]))\n # log.debug(\"LOGG[%d]=%f\"%(i,logg[i]))\n # log.debug(\"FEH[%d]=%f\"%(i,feh[i]))\n log.debug(\"TEFF=%f\"%np.inner(final_coefficients,teff))\n log.debug(\"LOGG=%f\"%np.inner(final_coefficients,logg))\n log.debug(\"FEH=%f\"%np.inner(final_coefficients,feh))\n log.debug(\"Contributing template Ids=%s\"%np.where(final_coefficients!=0)[0])\n\n '''\n # useful debugging plot\n import matplotlib.pyplot as plt\n plt.figure()\n ok=np.where(data_ivar>0)[0]\n ii=np.argsort(data_wave[ok])\n twave=data_wave[ok][ii]\n tflux=data_flux[ok][ii]\n tivar=data_ivar[ok][ii]\n #plt.errorbar(twave,tflux,1./np.sqrt(tivar),fmt=\"o\")\n plt.plot(twave,tflux,\".\",c=\"gray\",alpha=0.2)\n dw=np.min(twave[twave>twave[0]+0.5]-twave[0])\n bins=np.linspace(twave[0],twave[-1],(twave[-1]-twave[0])/dw+1)\n sw,junk=np.histogram(twave,bins=bins,weights=tivar)\n swx,junk=np.histogram(twave,bins=bins,weights=tivar*twave)\n swy,junk=np.histogram(twave,bins=bins,weights=tivar*tflux)\n tflux=swy[sw>0]/sw[sw>0]\n twave2=swx[sw>0]/sw[sw>0]\n terr=1./np.sqrt(sw[sw>0])\n plt.errorbar(twave2,tflux,terr,fmt=\"o\",alpha=0.5)\n model = np.zeros(data_flux.shape)\n for c,t in zip(coef,node_template_flux) :\n model += c*t\n plt.plot(twave,model[ok][ii],\"-\",c=\"r\")\n plt.show()\n '''\n\n\n return final_coefficients,chi2\n\n\ndef match_templates(wave, flux, ivar, resolution_data, stdwave, stdflux, teff, logg, feh, ncpu=1, z_max=0.005, z_res=0.00002, template_error=0):\n \"\"\"For each input spectrum, identify which standard star template is the closest\n match, factoring out broadband throughput/calibration differences.\n\n Args:\n wave : A dictionary of 1D array of vacuum wavelengths [Angstroms]. Example below.\n flux : A dictionary of 1D observed flux for the star\n ivar : A dictionary 1D inverse variance of flux\n resolution_data: resolution corresponding to the star's fiber\n stdwave : 1D standard star template wavelengths [Angstroms]\n stdflux : 2D[nstd, nwave] template flux\n teff : 1D[nstd] effective model temperature\n logg : 1D[nstd] model surface gravity\n feh : 1D[nstd] model metallicity\n ncpu : number of cpu for multiprocessing\n\n Returns:\n coef : numpy.array of linear coefficient of standard stars\n redshift : redshift of standard star\n chipdf : reduced chi2\n\n Notes:\n - wave and stdwave can be on different grids that don't\n necessarily overlap\n - wave does not have to be uniform or monotonic. Multiple cameras\n can be supported by concatenating their wave and flux arrays\n \"\"\"\n # I am treating the input arguments from three frame files as dictionary. For example\n # wave{\"r\":rwave,\"b\":bwave,\"z\":zwave}\n # Each data(3 channels) is compared to every model.\n # flux should be already flat fielded and sky subtracted.\n\n\n\n cameras = list(flux.keys())\n log = get_logger()\n log.debug(time.asctime())\n\n # fit continuum and save it\n continuum={}\n for cam in wave.keys() :\n tmp=applySmoothingFilter(flux[cam]) # this is fast\n continuum[cam] = tmp\n\n # mask out wavelength that could bias the fit\n\n log.debug(\"mask potential cosmics (3 sigma positive fluctuations)\")\n for cam in wave.keys() :\n ok=np.where((ivar[cam]>0))[0]\n if ok.size>0 :\n ivar[cam][ok] *= (flux[cam][ok]<(continuum[cam][ok]+3/np.sqrt(ivar[cam][ok])))\n\n\n log.debug(\"mask sky lines\")\n # in vacuum\n # mask blue lines that can affect fit of Balmer series\n # line at 5577 has a stellar line close to it !\n # line at 7853. has a stellar line close to it !\n # mask everything above 8270A because it can bias the star redshift\n # all of this is based on analysis of a few exposures of BOSS data\n # in vacuum\n skylines=np.array([4047.5,4359.3,5462.3,5578.9,5891.3,5897.3,6301.8,6365.4,7823.3,7855.2])\n\n hw=6. # A\n for cam in wave.keys() :\n for line in skylines :\n ivar[cam][(wave[cam]>=(line-hw))&(wave[cam]<=(line+hw))]=0.\n ivar[cam][wave[cam]>8270]=0.\n\n # mask telluric lines\n srch_filename = \"data/arc_lines/telluric_lines.txt\"\n if not resource_exists('desispec', srch_filename):\n log.error(\"Cannot find telluric mask file {:s}\".format(srch_filename))\n raise Exception(\"Cannot find telluric mask file {:s}\".format(srch_filename))\n telluric_mask_filename = resource_filename('desispec', srch_filename)\n telluric_features = np.loadtxt(telluric_mask_filename)\n log.debug(\"Masking telluric features from file %s\"%telluric_mask_filename)\n for cam in wave.keys() :\n for feature in telluric_features :\n ivar[cam][(wave[cam]>=feature[0])&(wave[cam]<=feature[1])]=0.\n\n\n\n # add error propto to flux to account for model error\n if template_error>0 :\n for cam in wave.keys() :\n ok=np.where(ivar[cam]>0)[0]\n if ok.size>0 :\n ivar[cam][ok] = 1./ ( 1./ivar[cam][ok] + (template_error*continuum[cam][ok] )**2 )\n\n # normalize data and store them in single array\n data_wave=np.array([])\n data_flux=np.array([])\n data_continuum=np.array([])\n data_ivar=np.array([])\n data_index=np.array([])\n sorted_keys = list(wave.keys())\n sorted_keys.sort() # force sorting the keys to agree with models (found unpredictable ordering in tests)\n for index,cam in enumerate(sorted_keys) :\n data_index=np.append(data_index,np.ones(wave[cam].size)*index)\n data_wave=np.append(data_wave,wave[cam])\n data_flux=np.append(data_flux,flux[cam]/(continuum[cam]+(continuum[cam]==0)))\n data_continuum=np.append(data_continuum,continuum[cam])\n data_ivar=np.append(data_ivar,ivar[cam]*continuum[cam]**2)\n data_index=data_index.astype(int)\n\n ndata = np.sum(data_ivar>0)\n\n\n # start looking at models\n\n # find canonical f-type model: Teff=6000, logg=4, Fe/H=-1.5\n canonical_model=np.argmin((teff-6000.0)**2+(logg-4.0)**2+(feh+1.5)**2)\n\n # fit redshift on canonical model\n # we use the original data to do this\n # because we resample both the data and model on a logarithmic grid in the routine\n\n if True : # mask Ca H&K lines. Present in ISM, can bias the stellar redshift fit\n log.debug(\"Mask ISM lines for redshift\")\n ismlines=np.array([3934.77,3969.59])\n hw=6. # A\n for cam in wave.keys() :\n for line in ismlines :\n ivar[cam][(wave[cam]>=(line-hw))&(wave[cam]<=(line+hw))]=0.\n\n z = redshift_fit(wave, flux, ivar, resolution_data, stdwave, stdflux[canonical_model], z_max, z_res)\n\n # now we go back to the model spectra , redshift them, resample, apply resolution, normalize and chi2 match\n\n ntemplates=stdflux.shape[0]\n\n # here we take into account the redshift once and for all\n shifted_stdwave=stdwave*(1+z)\n\n func_args = []\n # need to parallelize the model resampling\n for template_id in range(ntemplates) :\n arguments={\"data_wave_per_camera\":wave,\n \"resolution_data_per_camera\":resolution_data,\n \"template_wave\":shifted_stdwave,\n \"template_flux\":stdflux[template_id],\n \"template_id\":template_id}\n func_args.append( arguments )\n\n\n if ncpu > 1:\n log.debug(\"creating multiprocessing pool with %d cpus\"%ncpu); sys.stdout.flush()\n pool = multiprocessing.Pool(ncpu)\n log.debug(\"Running pool.map() for {} items\".format(len(func_args))); sys.stdout.flush()\n results = pool.map(_func, func_args)\n log.debug(\"Finished pool.map()\"); sys.stdout.flush()\n pool.close()\n pool.join()\n log.debug(\"Finished pool.join()\"); sys.stdout.flush()\n else:\n log.debug(\"Not using multiprocessing for {} cpus\".format(ncpu))\n\n results = [_func(x) for x in func_args]\n log.debug(\"Finished serial loop\")\n\n # collect results\n # in case the exit of the multiprocessing pool is not ordered as the input\n # we returned the template_id\n template_flux=np.zeros((ntemplates,data_flux.size))\n template_norm=np.zeros((ntemplates,data_flux.size))\n for result in results :\n template_id = result[0]\n template_tmp_wave = result[1]\n template_tmp_flux = result[2]\n template_tmp_norm = result[3]\n mdiff=np.max(np.abs(data_wave-template_tmp_wave)) # just a safety check\n if mdiff>1.e-5 :\n log.error(\"error indexing of wave and flux somewhere above, checking if it's just an ordering issue, max diff=%f\"%mdiff)\n raise ValueError(\"wavelength array difference cannot be fixed with reordering, ordered max diff=%f\"%mdiff)\n template_flux[template_id] = template_tmp_flux\n template_norm[template_id] = template_tmp_norm\n\n # compute model chi2\n template_chi2=np.zeros(ntemplates)\n for template_id in range(ntemplates) :\n template_chi2[template_id] = np.sum(data_ivar*(data_flux-template_flux[template_id])**2)\n\n best_model_id=np.argmin(template_chi2)\n best_chi2=template_chi2[best_model_id]\n log.debug(\"selected best model {} chi2/ndf {}\".format(best_model_id, best_chi2/ndata))\n\n # interpolate around best model using parameter grid\n coef,chi2 = interpolate_on_parameter_grid(data_wave, data_flux, data_ivar, template_flux, teff, logg, feh, template_chi2)\n log.debug(\"after interpolation chi2/ndf {}\".format(chi2/ndata))\n\n log.debug(\"use best fit to derive calibration and apply it to the templates before refitting the star ...\")\n # the division by the median filtered spectrum leaves some imprint of the input transmission\n # so we will apply calibration to the model and redo the whole fit\n # to make sure this is not driving the stellar model selection.\n\n\n log.debug(\"remultiply template by their norme\")\n template_flux *= template_norm\n\n log.debug(\"compute best fit model\")\n model=np.zeros(data_wave.size)\n for c,t in zip(coef,template_flux) :\n if c>0 : model += c*t\n\n\n func_args=[]\n for index in np.unique(data_index) :\n log.debug(\"compute calib for cam index %d\"%index)\n ii=np.where(data_index==index)[0]\n calib = (data_flux[ii]*data_continuum[ii])/(model[ii]+(model[ii]==0))\n scalib = applySmoothingFilter(calib,width=400)\n\n min_scalib=0.\n bad=scalib<=min_scalib\n if np.sum(bad)>0 :\n scalib[bad]=min_scalib\n\n log.debug(\"multiply templates by calib for cam index %d\"%index)\n template_flux[:,ii] *= scalib\n\n # apply this to all the templates and recompute median filter\n for t in range(template_flux.shape[0]) :\n arguments={\"template_id\":t,\"camera_index\":index,\"template_flux\":template_flux[t][ii]}\n func_args.append(arguments)\n\n if ncpu > 1:\n log.debug(\"divide templates by median filters using multiprocessing.Pool of ncpu=%d\"%ncpu)\n pool = multiprocessing.Pool(ncpu)\n results = pool.map(_func2, func_args)\n log.debug(\"finished pool.map()\"); sys.stdout.flush()\n pool.close()\n pool.join()\n log.debug(\"finished pool.join()\"); sys.stdout.flush()\n else :\n log.debug(\"divide templates serially\")\n results = [_func2(x) for x in func_args]\n log.debug(\"Finished serial loop\")\n\n # collect results\n for result in results :\n template_id = result[0]\n index = result[1]\n template_flux[template_id][data_index==index] /= (result[2] + (result[2]==0))\n\n log.debug(\"refit the model ...\")\n template_chi2=np.zeros(ntemplates)\n for template_id in range(ntemplates) :\n template_chi2[template_id] = np.sum(data_ivar*(data_flux-template_flux[template_id])**2)\n\n best_model_id=np.argmin(template_chi2)\n best_chi2=template_chi2[best_model_id]\n\n log.debug(\"selected best model {} chi2/ndf {}\".format(best_model_id, best_chi2/ndata))\n\n # interpolate around best model using parameter grid\n coef,chi2 = interpolate_on_parameter_grid(data_wave, data_flux, data_ivar, template_flux, teff, logg, feh, template_chi2)\n log.debug(\"after interpolation chi2/ndf {}\".format(chi2/ndata))\n\n\n return coef,z,chi2/ndata\n\n\ndef normalize_templates(stdwave, stdflux, mag, band, photsys):\n \"\"\"Returns spectra normalized to input magnitudes.\n\n Args:\n stdwave : 1D array of standard star wavelengths [Angstroms]\n stdflux : 1D observed flux\n mag : float desired magnitude\n band : G,R,Z,W1 or W2\n photsys : N or S (for Legacy Survey North or South)\n\n Returns:\n stdwave : same as input\n normflux : normalized flux array\n\n Only SDSS_r band is assumed to be used for normalization for now.\n \"\"\"\n log = get_logger()\n fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom\n filter_response=load_legacy_survey_filter(band,photsys)\n apMag=filter_response.get_ab_magnitude(stdflux*fluxunits,stdwave)\n scalefac=10**((apMag-mag)/2.5)\n log.debug('scaling mag {:.3f} to {:.3f} using scalefac {:.3f}'.format(apMag,mag, scalefac))\n normflux=stdflux*scalefac\n\n return normflux\n\ndef compute_flux_calibration(frame, input_model_wave,input_model_flux,input_model_fibers, nsig_clipping=10.,deg=2,debug=False,highest_throughput_nstars=0) :\n\n \"\"\"Compute average frame throughput based on data frame.(wave,flux,ivar,resolution_data)\n and spectro-photometrically calibrated stellar models (model_wave,model_flux).\n Wave and model_wave are not necessarily on the same grid\n\n Args:\n frame : Frame object with attributes wave, flux, ivar, resolution_data\n input_model_wave : 1D[nwave] array of model wavelengths\n input_model_flux : 2D[nstd, nwave] array of model fluxes\n input_model_fibers : 1D[nstd] array of model fibers\n nsig_clipping : (optional) sigma clipping level\n\n Returns:\n desispec.FluxCalib object\n calibration: mean calibration (without resolution)\n\n Notes:\n - we first resample the model on the input flux wave grid\n - then convolve it to the data resolution (the input wave grid is supposed finer than the spectral resolution)\n - then iteratively\n - fit the mean throughput (deconvolved, this is needed because of sharp atmospheric absorption lines)\n - compute broad band correction to fibers (to correct for small mis-alignement for instance)\n - perform outlier rejection\n\n There is one subtelty with the relation between calibration and resolution.\n - The input frame flux is on average flux^frame_fiber = R_fiber*C*flux^true where C is the true calibration (or throughput)\n which is a function of wavelength. This is the system we solve.\n - But we want to return a calibration vector per fiber C_fiber defined by flux^cframe_fiber = flux^frame_fiber/C_fiber,\n such that flux^cframe can be compared with a convolved model of the truth, flux^cframe_fiber = R_fiber*flux^true,\n i.e. (R_fiber*C*flux^true)/C_fiber = R_fiber*true_flux, giving C_fiber = (R_fiber*C*flux^true)/(R_fiber*flux^true)\n - There is no solution for this for all possible input specta. The solution for a flat spectrum is returned,\n which is very close to C_fiber = R_fiber*C (but not exactly).\n\n \"\"\"\n\n log=get_logger()\n log.info(\"starting\")\n\n # add margin to frame\n def add_margin_2d_dim1(iarray,margin) :\n shape=(iarray.shape[0],iarray.shape[1]+2*margin)\n oarray=np.zeros(shape,dtype=iarray.dtype)\n oarray[:,:margin]=iarray[:,0][:,None]\n oarray[:,margin:-margin]=iarray\n oarray[:,-margin:]=iarray[:,-1][:,None]\n return oarray\n def add_margin_3d_dim2(iarray,margin) :\n shape=(iarray.shape[0],iarray.shape[1],iarray.shape[2]+2*margin)\n oarray=np.zeros(shape,dtype=iarray.dtype)\n oarray[:,:,:margin]=iarray[:,:,0][:,:,None]\n oarray[:,:,margin:-margin]=iarray\n oarray[:,:,-margin:]=iarray[:,:,-1][:,:,None]\n return oarray\n\n margin = 3\n log.info(\"adding margin of {} pixels on each side\".format(margin))\n nwave=frame.wave.size\n dw=frame.wave[1]-frame.wave[0]\n wave_with_margin=np.zeros(nwave+2*margin)\n wave_with_margin[margin:nwave+margin]=frame.wave\n wave_with_margin[0:margin]=frame.wave[0]+dw*np.arange(-margin,0)\n wave_with_margin[nwave+margin:]=frame.wave[-1]+dw*np.arange(1,margin+1)\n tframe = copy.deepcopy(frame)\n tframe.wave = wave_with_margin\n tframe.nwave = tframe.wave.size\n tframe.flux = add_margin_2d_dim1(frame.flux,margin)\n tframe.ivar = add_margin_2d_dim1(frame.ivar,margin)\n tframe.mask = add_margin_2d_dim1(frame.mask,margin)\n tframe.resolution_data = add_margin_3d_dim2(frame.resolution_data,margin)\n tframe.R = np.array( [Resolution(r) for r in tframe.resolution_data] )\n\n #- Pull out just the standard stars for convenience, but keep the\n #- full frame of spectra around because we will later need to convolved\n #- the calibration vector for each fiber individually\n stdfibers = np.where(isStdStar(tframe.fibermap))[0]\n assert len(stdfibers) > 0\n\n if not np.all(np.in1d(stdfibers, input_model_fibers)):\n bad = set(input_model_fibers) - set(stdfibers)\n if len(bad) > 0:\n log.error('Discarding input_model_fibers that are not standards: {}'.format(bad))\n stdfibers = np.intersect1d(stdfibers, input_model_fibers)\n\n # also other way around\n stdfibers = np.intersect1d(input_model_fibers, stdfibers)\n log.info(\"Std stars fibers: {}\".format(stdfibers))\n\n stdstars = tframe[stdfibers]\n\n nwave=stdstars.nwave\n nstds=stdstars.flux.shape[0]\n\n dwave=(stdstars.wave-np.mean(stdstars.wave))/(stdstars.wave[-1]-stdstars.wave[0]) # normalized wave for polynomial fit\n\n # resample model to data grid and convolve by resolution\n model_flux=np.zeros((nstds, nwave))\n convolved_model_flux=np.zeros((nstds, nwave))\n\n for star in range(nstds) :\n model_flux_index = np.where(input_model_fibers == stdfibers[star])[0][0]\n model_flux[star]=resample_flux(stdstars.wave,input_model_wave,input_model_flux[model_flux_index])\n convolved_model_flux[star]=stdstars.R[star].dot(model_flux[star])\n\n input_model_flux = None # I shall not use any more the input_model_flux here\n\n # iterative fitting and clipping to get precise mean spectrum\n current_ivar=stdstars.ivar*(stdstars.mask==0)\n\n #- Start with a first pass median rejection\n calib = (convolved_model_flux!=0)*(stdstars.flux/(convolved_model_flux + (convolved_model_flux==0)))\n median_calib = np.median(calib, axis=0)\n\n # First fit of smooth correction per fiber, and 10% model error to variance, and perform first outlier rejection\n smooth_fiber_correction=np.ones((stdstars.flux.shape))\n chi2=np.zeros((stdstars.flux.shape))\n\n badfiber=np.zeros(nstds,dtype=int)\n\n for star in range(nstds) :\n if badfiber[star] : continue\n if np.sum(current_ivar[star]) == 0 :\n log.warning(\"null inverse variance for star {}\".format(star))\n badfiber[star] = 1\n continue\n\n M = median_calib*stdstars.R[star].dot(model_flux[star])\n\n try:\n ii = np.where(M>0.1*np.mean(M))[0]\n if ii.size == 0 :\n current_ivar[star]=0.\n badfiber[star] = 1\n continue\n pol=np.poly1d(np.polyfit(dwave[ii],stdstars.flux[star,ii]/M[ii],deg=deg,w=current_ivar[star,ii]*M[ii]**2))\n smooth_fiber_correction[star]=pol(dwave)\n except ValueError :\n log.warning(\"polynomial fit for star %d failed\"%star)\n current_ivar[star]=0.\n badfiber[star] = 1\n continue\n except numpy.linalg.LinAlgError :\n log.warning(\"polynomial fit for star %d failed\"%star)\n current_ivar[star]=0.\n badfiber[star] = 1\n continue\n\n # add few percent multiplicative error to ivar for sigma clipping\n chi2[star]=(current_ivar[star]>0)*(stdstars.flux[star]-smooth_fiber_correction[star]*M)**2/(1./(current_ivar[star] + (current_ivar[star]==0))+(0.1*stdstars.flux[star])**2)\n # checking indexing using mags\n #from desispec.io.filters import load_legacy_survey_filter\n #from astropy import units\n #filter_response=load_legacy_survey_filter(\"R\",\"N\")\n #fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom\n #dummy_wave = np.linspace(3000,12000,12000-3000)\n #dummy_flux = np.interp(dummy_wave,stdstars.wave,M,left=0,right=0)\n #mag = filter_response.get_ab_magnitude(dummy_flux*fluxunits,dummy_wave)\n #fmapmag = -2.5*np.log10(stdstars.fibermap[\"FLUX_R\"][star])+22.5\n #print(\"star index={} flux ratio={}\".format(star,10**(0.4*(mag-fmapmag))))\n\n bad=(chi2>nsig_clipping**2)\n current_ivar[bad] = 0\n\n sqrtw=np.sqrt(current_ivar)\n sqrtwflux=np.sqrt(current_ivar)*stdstars.flux\n\n # diagonal sparse matrices\n D1=scipy.sparse.lil_matrix((nwave,nwave))\n D2=scipy.sparse.lil_matrix((nwave,nwave))\n\n\n nout_tot=0\n previous_mean=0.\n for iteration in range(20) :\n\n # fit mean calibration\n A=scipy.sparse.lil_matrix((nwave,nwave)).tocsr()\n B=np.zeros((nwave))\n\n # loop on star to handle resolution\n for star in range(nstds) :\n if star%10==0 :\n log.info(\"iter %d star %d\"%(iteration,star))\n\n if badfiber[star]: continue\n\n R = stdstars.R[star]\n\n # diagonal sparse matrix with content = sqrt(ivar)*flat\n D1.setdiag(sqrtw[star]*smooth_fiber_correction[star])\n D2.setdiag(model_flux[star])\n sqrtwmodelR = D1.dot(R.dot(D2)) # chi2 = sum (sqrtw*data_flux -diag(sqrtw)*smooth_fiber_correction*R*diag(model_flux)*calib )\n\n A = A+(sqrtwmodelR.T*sqrtwmodelR).tocsr()\n B += sqrtwmodelR.T*sqrtwflux[star]\n\n if np.sum(current_ivar>0)==0 :\n log.error(\"null ivar, cannot calibrate this frame\")\n raise ValueError(\"null ivar, cannot calibrate this frame\")\n\n #- Add a weak prior that calibration = median_calib\n #- to keep A well conditioned\n minivar = np.min(current_ivar[current_ivar>0])\n log.debug('min(ivar[ivar>0]) = {}'.format(minivar))\n epsilon = minivar/10000\n A = epsilon*np.eye(nwave) + A #- converts sparse A -> dense A\n B += median_calib*epsilon\n\n log.info(\"iter %d solving\"%iteration)\n ### log.debug('cond(A) {:g}'.format(np.linalg.cond(A)))\n #calibration=cholesky_solve(A, B)\n w = np.diagonal(A)>0\n A_pos_def = A[w,:]\n A_pos_def = A_pos_def[:,w]\n calibration = B*0\n try:\n calibration[w]=cholesky_solve(A_pos_def, B[w])\n except np.linalg.linalg.LinAlgError :\n log.info('cholesky fails in iteration {}, trying svd'.format(iteration))\n calibration[w] = np.linalg.lstsq(A_pos_def,B[w])[0]\n\n wmask = (np.diagonal(A)<=0)\n if np.sum(wmask)>0 :\n wmask = wmask.astype(float)\n wmask = R.dot(R.dot(wmask))\n bad = np.where(wmask!=0)[0]\n log.info(\"nbad={}\".format(bad.size))\n good = np.where(wmask==0)[0]\n calibration[bad] = np.interp(bad,good,calibration[good],left=0,right=0)\n\n log.info(\"iter %d fit smooth correction per fiber\"%iteration)\n # fit smooth fiberflat and compute chi2\n for star in range(nstds) :\n if star%10==0 :\n log.info(\"iter %d fiber %d(smooth)\"%(iteration,star))\n\n if badfiber[star]: continue\n\n M = stdstars.R[star].dot(calibration*model_flux[star])\n\n try:\n ii = np.where(M>0.1*np.mean(M))[0]\n if ii.size == 0 :\n current_ivar[star]=0.\n badfiber[star] = 1\n continue\n pol=np.poly1d(np.polyfit(dwave[ii],stdstars.flux[star,ii]/M[ii],deg=deg,w=current_ivar[star,ii]*M[ii]**2))\n smooth_fiber_correction[star]=pol(dwave)\n except ValueError as e :\n log.warning(\"polynomial fit for star %d failed\"%star)\n current_ivar[star]=0.\n badfiber[star] = 1.\n continue\n except numpy.linalg.LinAlgError as e :\n log.warning(\"polynomial fit for star %d failed\"%star)\n current_ivar[star]=0.\n badfiber[star] = 1.\n continue\n chi2[star]=current_ivar[star]*(stdstars.flux[star]-smooth_fiber_correction[star]*M)**2\n\n log.info(\"iter {0:d} rejecting\".format(iteration))\n\n nout_iter=0\n if iteration<1 :\n # only remove worst outlier per wave\n # apply rejection iteratively, only one entry per wave among stars\n # find waves with outlier (fastest way)\n nout_per_wave=np.sum(chi2>nsig_clipping**2,axis=0)\n selection=np.where(nout_per_wave>0)[0]\n for i in selection :\n worst_entry=np.argmax(chi2[:,i])\n current_ivar[worst_entry,i]=0\n sqrtw[worst_entry,i]=0\n #sqrtwmodel[worst_entry,i]=0\n sqrtwflux[worst_entry,i]=0\n nout_iter += 1\n\n else :\n # remove all of them at once\n bad=(chi2>nsig_clipping**2)\n current_ivar *= (bad==0)\n sqrtw *= (bad==0)\n #sqrtwmodel *= (bad==0)\n sqrtwflux *= (bad==0)\n nout_iter += np.sum(bad)\n\n nout_tot += nout_iter\n\n sum_chi2=float(np.sum(chi2))\n ndf=int(np.sum(chi2>0)-nwave-nstds*2)\n chi2pdf=0.\n if ndf>0 :\n chi2pdf=sum_chi2/ndf\n\n # normalize to preserve the average throughput\n # and throughput = < data/model/correction >\n # and we would like to have throughput = < data/model >\n # (we don't do that directly to reduce noise)\n # so we want to average the inverse of the smooth correction\n mean=1./np.nanmean(1./smooth_fiber_correction[badfiber==0],axis=0)\n medcorr = np.median(smooth_fiber_correction,axis=1)\n log.info(\"median correction = {}\".format(medcorr))\n\n if highest_throughput_nstars > 0 :\n log.info(\"keeping {} stars with highest throughput\".format(highest_throughput_nstars))\n ii=np.argsort(medcorr)[::-1][:highest_throughput_nstars]\n log.info(\"use those fibers = {}\".format(stdfibers[ii]))\n log.info(\"with median correction = {}\".format(medcorr[ii]))\n mean=1./np.nanmean(1./smooth_fiber_correction[ii][badfiber[ii]==0],axis=0)\n else :\n mmedcorr = np.median(medcorr)\n rmscorr = 1.4*np.median(np.abs(medcorr-mmedcorr))\n log.info(\"mean rms correction = {} {}\".format(mmedcorr,rmscorr))\n bad=(np.abs(medcorr-mmedcorr)>3*rmscorr)\n if np.sum(bad)>0 :\n good=(np.abs(medcorr-mmedcorr)<=3*rmscorr)\n log.info(\"use {} stars, discarding 3 sigma outlier stars with medcorr = {}\".format(np.sum(good),medcorr[bad]))\n mean=1./np.nanmean(1./smooth_fiber_correction[good][badfiber[good]==0],axis=0)\n else :\n log.info(\"use {} stars\".format(medcorr.size))\n\n smooth_fiber_correction /= mean\n\n log.info(\"iter #%d chi2=%f ndf=%d chi2pdf=%f nout=%d mean=%f\"%(iteration,sum_chi2,ndf,chi2pdf,nout_iter,np.mean(mean)))\n\n if nout_iter == 0 and np.max(np.abs(mean-previous_mean))<0.0001 :\n break\n previous_mean = mean\n\n # smooth_fiber_correction does not converge exactly to one on average, so we apply its mean to the calibration\n # (tested on sims)\n calibration /= mean\n\n log.info(\"nout tot=%d\"%nout_tot)\n\n # solve once again to get deconvolved variance\n #calibration,calibcovar=cholesky_solve_and_invert(A.todense(),B)\n calibcovar=np.linalg.inv(A)\n calibvar=np.diagonal(calibcovar)\n log.info(\"mean(var)={0:f}\".format(np.mean(calibvar)))\n\n calibvar=np.array(np.diagonal(calibcovar))\n # apply the mean (as in the iterative loop)\n calibvar *= mean**2\n calibivar=(calibvar>0)/(calibvar+(calibvar==0))\n\n # we also want to save the convolved calibration and a calibration variance\n # first compute average resolution\n mean_res_data=np.mean(tframe.resolution_data,axis=0)\n R = Resolution(mean_res_data)\n # compute convolved calib\n ccalibration = np.zeros(tframe.flux.shape)\n for i in range(tframe.nspec):\n norme = tframe.R[i].dot(np.ones(calibration.shape))\n ok=np.where(norme>0)[0]\n if ok.size :\n ccalibration[i][ok]=tframe.R[i].dot(calibration)[ok]/norme[ok]\n\n # Use diagonal of mean calibration covariance for output.\n ccalibcovar=R.dot(calibcovar).dot(R.T.todense())\n ccalibvar=np.array(np.diagonal(ccalibcovar))\n\n # apply the mean (as in the iterative loop)\n ccalibvar *= mean**2\n ccalibivar=(ccalibvar>0)/(ccalibvar+(ccalibvar==0))\n\n # at least a few stars at each wavelength\n min_number_of_stars = min(3,max(1,nstds//2))\n nstars_with_signal=np.sum(current_ivar>0,axis=0)\n bad = (nstars_with_signal<min_number_of_stars)\n nallbad = np.sum(nstars_with_signal==0)\n # increase by 1 pixel\n bad[1:-1] |= bad[2:]\n bad[1:-1] |= bad[:-2]\n nbad=np.sum(bad>0)\n log.info(\"Requesting at least {} star spectra at each wavelength results in masking {} add. flux bins ({} already masked)\".format(min_number_of_stars,nbad-nallbad,nallbad))\n\n ccalibivar[bad]=0.\n ccalibration[:,bad]=0.\n\n # convert to 2D\n # For now this is the same for all fibers; in the future it may not be\n ccalibivar = np.tile(ccalibivar, tframe.nspec).reshape(tframe.nspec, tframe.nwave)\n\n # need to do better here\n mask = tframe.mask.copy()\n\n mccalibration = R.dot(calibration)\n\n # trim back\n ccalibration=ccalibration[:,margin:-margin]\n ccalibivar=ccalibivar[:,margin:-margin]\n mask=mask[:,margin:-margin]\n mccalibration=mccalibration[margin:-margin]\n stdstars.wave=stdstars.wave[margin:-margin]\n\n # return calibration, calibivar, mask, ccalibration, ccalibivar\n return FluxCalib(stdstars.wave, ccalibration, ccalibivar, mask, mccalibration)\n\n\n\nclass FluxCalib(object):\n def __init__(self, wave, calib, ivar, mask, meancalib=None):\n \"\"\"Lightweight wrapper object for flux calibration vectors\n\n Args:\n wave : 1D[nwave] input wavelength (Angstroms)\n calib: 2D[nspec, nwave] calibration vectors for each spectrum\n ivar : 2D[nspec, nwave] inverse variance of calib\n mask : 2D[nspec, nwave] mask of calib (0=good)\n meancalib : 1D[nwave] mean convolved calibration (optional)\n\n All arguments become attributes, plus nspec,nwave = calib.shape\n\n The calib vector should be such that\n\n [1e-17 erg/s/cm^2/A] = [photons/A] / calib\n \"\"\"\n assert wave.ndim == 1\n assert calib.ndim == 2\n assert calib.shape == ivar.shape\n assert calib.shape == mask.shape\n assert np.all(ivar >= 0)\n\n self.nspec, self.nwave = calib.shape\n self.wave = wave\n self.calib = calib\n self.ivar = ivar\n self.mask = util.mask32(mask)\n self.meancalib = meancalib\n\n self.meta = dict(units='photons/(erg/s/cm^2)')\n\n def __repr__(self):\n txt = '<{:s}: nspec={:d}, nwave={:d}, units={:s}'.format(\n self.__class__.__name__, self.nspec, self.nwave, self.meta['units'])\n\n # Finish\n txt = txt + '>'\n return (txt)\n\n\ndef apply_flux_calibration(frame, fluxcalib):\n \"\"\"\n Applies flux calibration to input flux and ivar\n\n Args:\n frame: Spectra object with attributes wave, flux, ivar, resolution_data\n fluxcalib : FluxCalib object with wave, calib, ...\n\n Modifies frame.flux and frame.ivar\n \"\"\"\n log=get_logger()\n log.info(\"starting\")\n\n # check same wavelength, die if not the case\n mval=np.max(np.abs(frame.wave-fluxcalib.wave))\n #if mval > 0.00001 :\n if mval > 0.001 :\n log.error(\"not same wavelength (should raise an error instead)\")\n sys.exit(12)\n\n nwave=frame.nwave\n nfibers=frame.nspec\n\n \"\"\"\n F'=F/C\n Var(F') = Var(F)/C**2 + F**2*( d(1/C)/dC )**2*Var(C)\n = 1/(ivar(F)*C**2) + F**2*(1/C**2)**2*Var(C)\n = 1/(ivar(F)*C**2) + F**2*Var(C)/C**4\n = 1/(ivar(F)*C**2) + F**2/(ivar(C)*C**4)\n \"\"\"\n # for fiber in range(nfibers) :\n # C = fluxcalib.calib[fiber]\n # flux[fiber]=frame.flux[fiber]*(C>0)/(C+(C==0))\n # ivar[fiber]=(ivar[fiber]>0)*(civar[fiber]>0)*(C>0)/( 1./((ivar[fiber]+(ivar[fiber]==0))*(C**2+(C==0))) + flux[fiber]**2/(civar[fiber]*C**4+(civar[fiber]*(C==0))) )\n\n C = fluxcalib.calib\n frame.flux = frame.flux * (C>0) / (C+(C==0))\n frame.ivar *= (fluxcalib.ivar>0) * (C>0)\n for i in range(nfibers) :\n ok=np.where(frame.ivar[i]>0)[0]\n if ok.size>0 :\n frame.ivar[i,ok] = 1./( 1./(frame.ivar[i,ok]*C[i,ok]**2)+frame.flux[i,ok]**2/(fluxcalib.ivar[i,ok]*C[i,ok]**4) )\n\n\ndef ZP_from_calib(exptime, wave, calib):\n \"\"\" Calculate the ZP in AB magnitudes given the calibration and the wavelength arrays\n Args:\n exptime: float; exposure time in seconds\n wave: 1D array (A)\n calib: 1D array (converts erg/s/A to photons/s/A)\n\n Returns:\n ZP_AB: 1D array of ZP values in AB magnitudes\n\n \"\"\"\n ZP_flambda = 1e-17 / (calib/exptime) # erg/s/cm^2/A\n ZP_fnu = ZP_flambda * wave**2 / (2.9979e18) # c in A/s\n # Avoid 0 values\n ZP_AB = np.zeros_like(ZP_fnu)\n gdZ = ZP_fnu > 0.\n ZP_AB[gdZ] = -2.5 * np.log10(ZP_fnu[gdZ]) - 48.6\n # Return\n return ZP_AB\n\n\ndef qa_fluxcalib(param, frame, fluxcalib):\n \"\"\"\n Args:\n param: dict of QA parameters\n frame: Frame\n fluxcalib: FluxCalib\n\n Returns:\n qadict: dict of QA outputs\n Need to record simple Python objects for yaml (str, float, int)\n\n \"\"\"\n log = get_logger()\n qadict = {}\n\n # Unpack model\n exptime = frame.meta['EXPTIME']\n\n # Standard stars\n stdfibers = np.where(isStdStar(frame.fibermap))[0]\n stdstars = frame[stdfibers]\n nstds = len(stdfibers)\n\n # Calculate ZP for mean spectrum\n #medcalib = np.median(fluxcalib.calib,axis=0)\n medcalib = np.median(fluxcalib.calib[stdfibers],axis=0)\n ZP_AB = ZP_from_calib(exptime, fluxcalib.wave, medcalib) # erg/s/cm^2/A\n\n # ZP at fiducial wavelength (AB mag for 1 photon/s/A)\n iZP = np.argmin(np.abs(fluxcalib.wave-param['ZP_WAVE']))\n qadict['ZP'] = float(np.median(ZP_AB[iZP-10:iZP+10]))\n\n # Unpack star data\n #sqrtwmodel, sqrtwflux, current_ivar, chi2 = indiv_stars\n\n # RMS\n qadict['NSTARS_FIBER'] = int(nstds)\n ZP_fiducial = np.zeros(nstds)\n\n for ii in range(nstds):\n # Good pixels\n gdp = stdstars.ivar[ii, :] > 0.\n if not np.any(gdp):\n continue\n icalib = fluxcalib.calib[stdfibers[ii]][gdp]\n i_wave = fluxcalib.wave[gdp]\n # ZP\n ZP_stars = ZP_from_calib(exptime, i_wave, icalib)\n iZP = np.argmin(np.abs(i_wave-param['ZP_WAVE']))\n ZP_fiducial[ii] = float(np.median(ZP_stars[iZP-10:iZP+10]))\n #import pdb; pdb.set_trace()\n qadict['RMS_ZP'] = float(np.std(ZP_fiducial))\n\n # MAX ZP Offset\n #stdfibers = np.where(frame.fibermap['OBJTYPE'] == 'STD')[0]\n ZPoffset = ZP_fiducial-qadict['ZP']\n imax = np.argmax(np.abs(ZPoffset))\n qadict['MAX_ZP_OFF'] = [float(ZPoffset[imax]),\n int(stdfibers[np.argmax(ZPoffset)])]\n if qadict['MAX_ZP_OFF'][0] > param['MAX_ZP_OFF']:\n log.warning(\"Bad standard star ZP {:g}, in fiber {:d}\".format(\n qadict['MAX_ZP_OFF'][0], qadict['MAX_ZP_OFF'][1]))\n # Return\n return qadict\n" ]
[ [ "numpy.sqrt", "numpy.sum", "numpy.gradient", "numpy.zeros", "numpy.interp", "numpy.inner", "numpy.argsort", "numpy.abs", "numpy.median", "numpy.max", "numpy.log10", "numpy.min", "numpy.array", "numpy.where", "numpy.unique", "numpy.mean" ], [ "numpy.ones", "numpy.sum", "scipy.ndimage.filters.median_filter", "numpy.intersect1d", "numpy.inner", "numpy.any", "numpy.argsort", "numpy.polyfit", "numpy.append", "numpy.nanmean", "numpy.argmin", "numpy.abs", "numpy.in1d", "numpy.log10", "numpy.where", "numpy.linspace", "numpy.unique", "numpy.mean", "numpy.tile", "numpy.eye", "numpy.zeros", "numpy.median", "numpy.argmax", "numpy.count_nonzero", "numpy.arange", "numpy.all", "numpy.max", "numpy.min", "scipy.sparse.lil_matrix", "numpy.std", "numpy.array", "numpy.zeros_like", "numpy.interp", "numpy.linalg.inv", "numpy.linalg.lstsq", "numpy.sqrt", "numpy.diagonal", "numpy.loadtxt" ] ]
Duane321/pyprobml
[ "6d0ba29f22dc7fec9dfc73788bc5520e97663bdb" ]
[ "examples/kmeansHeightWeight.py" ]
[ "#!/usr/bin/env python\n\nimport matplotlib.pyplot as pl\nimport numpy as np\nfrom utils import util\nfrom sklearn.cluster import KMeans\nfrom utils.util import save_fig\n\ndata = util.load_mat('heightWeight/heightWeight')\ndata = data['heightWeightData']\nmarkers = 'Dox'\ncolors = 'rgb'\n\nfor i in range(3):\n KM_model = KMeans(init='k-means++', n_clusters=i+1)\n labels = KM_model.fit_predict(data[:, [1, 2]])\n labels_unique = np.unique(labels)\n fig = pl.figure(i)\n for j in range(len(labels_unique)):\n data_chosen = data[labels == labels_unique[j]]\n pl.scatter(data_chosen[:, 1], data_chosen[:, 2],\n marker=markers[j],\n color=colors[j])\n pl.title('k = %s' % (i+1))\n save_fig('kmeansHeightWeight_%s.png' % (i+1))\npl.show()\n" ]
[ [ "matplotlib.pyplot.figure", "sklearn.cluster.KMeans", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "numpy.unique", "matplotlib.pyplot.scatter" ] ]
mhnnunes/nas_gnn
[ "91092acfee9fdbbef3e22252040b80aa96143311" ]
[ "graphnas_variants/micro_graphnas/micro_search_space.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom torch.nn import Module\nfrom torch_geometric.nn.conv import *\n\ngnn_list = [\n \"gat_8\", # GAT with 8 heads\n \"gat_6\", # GAT with 6 heads\n \"gat_4\", # GAT with 4 heads\n \"gat_2\", # GAT with 2 heads\n \"gat_1\", # GAT with 1 heads\n \"gcn\", # GCN\n \"cheb\", # chebnet\n \"sage\", # sage\n \"arma\",\n \"sg\", # simplifying gcn\n \"linear\", # skip connection\n \"zero\", # skip connection\n]\nact_list = [\n # \"sigmoid\", \"tanh\", \"relu\", \"linear\",\n # \"softplus\", \"leaky_relu\", \"relu6\", \"elu\"\n \"sigmoid\", \"tanh\", \"relu\", \"linear\", \"elu\"\n]\n\n\ndef act_map(act):\n if act == \"linear\":\n return lambda x: x\n elif act == \"elu\":\n return F.elu\n elif act == \"sigmoid\":\n return torch.sigmoid\n elif act == \"tanh\":\n return torch.tanh\n elif act == \"relu\":\n return torch.nn.functional.relu\n elif act == \"relu6\":\n return torch.nn.functional.relu6\n elif act == \"softplus\":\n return torch.nn.functional.softplus\n elif act == \"leaky_relu\":\n return torch.nn.functional.leaky_relu\n else:\n raise Exception(\"wrong activate function\")\n\n\ndef gnn_map(gnn_name, in_dim, out_dim, concat=False, bias=True) -> Module:\n '''\n :param gnn_name:\n :param in_dim:\n :param out_dim:\n :param concat: for gat, concat multi-head output or not\n :return: GNN model\n '''\n if gnn_name == \"gat_8\":\n return GATConv(in_dim, out_dim, 8, concat=concat, bias=bias)\n elif gnn_name == \"gat_6\":\n return GATConv(in_dim, out_dim, 6, concat=concat, bias=bias)\n elif gnn_name == \"gat_4\":\n return GATConv(in_dim, out_dim, 4, concat=concat, bias=bias)\n elif gnn_name == \"gat_2\":\n return GATConv(in_dim, out_dim, 2, concat=concat, bias=bias)\n elif gnn_name in [\"gat_1\", \"gat\"]:\n return GATConv(in_dim, out_dim, 1, concat=concat, bias=bias)\n elif gnn_name == \"gcn\":\n return GCNConv(in_dim, out_dim)\n elif gnn_name == \"cheb\":\n return ChebConv(in_dim, out_dim, K=2, bias=bias)\n elif gnn_name == \"sage\":\n return SAGEConv(in_dim, out_dim, bias=bias)\n elif gnn_name == \"gated\":\n return GatedGraphConv(in_dim, out_dim, bias=bias)\n elif gnn_name == \"arma\":\n return ARMAConv(in_dim, out_dim, bias=bias)\n elif gnn_name == \"sg\":\n return SGConv(in_dim, out_dim, bias=bias)\n elif gnn_name == \"linear\":\n return LinearConv(in_dim, out_dim, bias=bias)\n elif gnn_name == \"zero\":\n return ZeroConv(in_dim, out_dim, bias=bias)\n\n\nclass LinearConv(Module):\n def __init__(self,\n in_channels,\n out_channels,\n bias=True):\n super(LinearConv, self).__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.linear = torch.nn.Linear(in_channels, out_channels, bias)\n\n def forward(self, x, edge_index, edge_weight=None):\n return self.linear(x)\n\n def __repr__(self):\n return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,\n self.out_channels)\n\n\nclass ZeroConv(Module):\n def __init__(self,\n in_channels,\n out_channels,\n bias=True):\n super(ZeroConv, self).__init__()\n self.out_dim = out_channels\n\n def forward(self, x, edge_index, edge_weight=None):\n return torch.zeros([x.size(0), self.out_dim]).to(x.device)\n\n def __repr__(self):\n return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,\n self.out_channels)\n\n\nclass SearchSpace(object):\n def __init__(self, search_space=None):\n if search_space:\n self.search_space = search_space\n else:\n self.search_space = {}\n self.search_space[\"act\"] = act_list # activate function\n self.search_space[\"gnn\"] = gnn_list # gnn type\n # 0 means history, 1 means current,\n # each layer contains two input\n self.search_space[\"self_index\"] = [0, 1]\n # same as self_index,\n self.search_space[\"concat_type\"] = [\"add\",\n \"product\",\n \"concat\"]\n self.search_space['learning_rate'] = [1e-2, 1e-3, 1e-4, 5e-3, 5e-4]\n self.search_space['dropout'] = [0.0, 0.1, 0.2, 0.3, 0.4,\n 0.5, 0.6, 0.7, 0.8, 0.9]\n self.search_space['weight_decay'] = [0, 1e-3, 1e-4,\n 1e-5, 5e-5, 5e-4]\n self.search_space['hidden_unit'] = [8, 16, 32, 64, 128, 256, 512]\n pass\n\n def get_search_space(self):\n return self.search_space\n\n @staticmethod\n def generate_action_list(cell=4):\n action_list = []\n for i in range(cell):\n action_list += [\"self_index\", \"gnn\"]\n action_list += [\"act\", \"concat_type\"]\n return action_list\n\n\nclass IncrementSearchSpace(object):\n def __init__(self, search_space=None, max_cell=10):\n if search_space:\n self.search_space = search_space\n else:\n self.search_space = {}\n self.search_space[\"act\"] = act_list # activate function\n self.search_space[\"gnn\"] = gnn_list # gnn type\n for i in range(max_cell):\n self.search_space[f\"self_index_{i}\"] = list(range(2 + i))\n # 0 means history, 1 means current,\n # each layer contains two input\n self.search_space[\"concat_type\"] = [\"add\",\n \"product\",\n \"concat\"]\n # same as self_index,\n self.search_space['learning_rate'] = [1e-2, 1e-3, 1e-4, 5e-3, 5e-4]\n self.search_space['dropout'] = [0.0, 0.1, 0.2, 0.3, 0.4,\n 0.5, 0.6, 0.7, 0.8, 0.9]\n self.search_space['weight_decay'] = [0, 1e-3, 1e-4,\n 1e-5, 5e-5, 5e-4]\n self.search_space['hidden_unit'] = [8, 16, 32, 64, 128, 256, 512]\n pass\n\n def get_search_space(self):\n return self.search_space\n\n @staticmethod\n def generate_action_list(cell=4):\n action_list = []\n for i in range(cell):\n action_list += [f\"self_index_{i}\", \"gnn\"]\n action_list += [\"act\", \"concat_type\"]\n return action_list\n\n\nif __name__ == \"__main__\":\n obj = IncrementSearchSpace()\n print(obj.generate_action_list())\n print(obj.get_search_space())\n" ]
[ [ "torch.nn.Linear" ] ]
NREL/PVwindow
[ "df7091c9d1ebd280aca53c50015e3b1ee7a3183e" ]
[ "tmmPCECalc.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 4 12:29:21 2021\n\n@author: aduell\n\"\"\"\n\n\n#import numpy as np\nfrom numpy import pi, linspace, array, exp\n#import tmm\nfrom tmm import inc_tmm, inc_absorp_in_each_layer, inf\n#import pandas as pd\n#import tmm_vw as tmm\n#import matplotlib.pyplot as plt\nfrom matplotlib.pyplot import plot,figure,xlabel,ylabel,show,ylim,legend\nfrom wpv import Layer, Stack\n#import scipy.interpolate, scipy.integrate, pandas, sys\nfrom scipy.interpolate import interp1d\nfrom scipy.integrate import quad, trapz\nfrom scipy.optimize import fsolve#, Bounds\nimport scipy.optimize\nfrom pandas import read_excel\nimport sys\n#import scipy\n#from numericalunits import W, K, nm, m, cm, s, eV, meV, V, mA, c0, hPlanck, kB, e, A, ohm\n#import sympy\n#import sympy.solvers.solvers\nassert sys.version_info >= (3,6), 'Requires Python 3.6+'\nfrom pvlib.pvsystem import singlediode\nimport tmmPVColor as pvc\nimport CalculateVLTFromSpectrum as cvs\nfrom CalculateVLTFromSpectrum import AM15G, cieplf\nimport vegas\n\n\n\n# This whole thing uses microns for length\n\n'''We determine the incident angle of the sun shining on the cell. Input is in degrees'''\ndef giveincangle(angle):\n degree = pi/180\n return angle*degree\ninc_angle = giveincangle(0) \n'''We determine the size and scaling of the photon wavelength scale. Units are um''' \nnum_lams = 500\nlams = linspace(0.3,2.5,num=num_lams) #um\n\n'''We are constants and help control units'''\nq = 1.602176634e-19 #coulombs. elementary charge \nc0 = 299792458 #m/s #Speed of light\nhPlanck = 6.62607015e-34 #J*s 4.135667516e-15 #eV*s \nkB = 1.380649e-23 #J/K 8.61733034e-5 #eV/K \n\n'''Some units and terms'''\n'''Tcell, Ti, To are cell temperature, inside temp and outside temp. Always in kelvin'''\n'''Ui and Uo are overall heat-transfer coefficient ofr in side and outside. W/(m**2 *K)'''\n'''AbsorberLayer is a number indicating the photoactive layer. If the fourth layer is the PV layer, input is 4'''\n''''Rs is series resistance, Rsh is shunt resistance in ohms. See pveducation.org for more info''' \n'''eta is the electron-hole pair extraction efficiency term. eta times all absorbed light in the PV layer gives the EQE'''\n'''n = diode ideality factor. Used in singlediode equation\nNs = number of cells in series. Used in singlediode equation'''\n'''Rtot is total thermal resistance of the window'''\n\n\n\n\n\n'''We are all the different materials currently available\nThickness is in microns'''\ndef Glass(Thickness = 6000):\n return Layer(Thickness,'nkLowFeGlass','i')\ndef TiO2(Thickness = 0.050):\n return Layer(Thickness,'nkTiO2','c')\ndef FTO(Thickness = 0.250):\n return Layer(Thickness,'nkFTO','c')\ndef MAPI(Thickness = 0.130): \n return Layer(Thickness,'nkMAPI','c')\ndef AZO(Thickness = 0.200):\n return Layer(Thickness,'nkAZO','c')\ndef ITO(Thickness = 0.200):\n return Layer(Thickness,'nkITO','c')\ndef ITOlowE(Thickness = 0.075):\n return Layer(Thickness,'nkITO','c')\ndef SnO2(Thickness = 0.05):\n return Layer(Thickness,'nkSnO2','c')\ndef SnO2lowE(Thickness = 0.030):\n return Layer(Thickness,'nkSnO2','c')\ndef SnO2lowEfat(Thickness = 0.050):\n return Layer(Thickness,'nkSnO2','c')\ndef SiO2(Thickness = 0.024):\n return Layer(Thickness,'nkSiO2','c')\ndef NiO(Thickness = 0.050):\n return Layer(Thickness,'nkNiO','c')\ndef Ag(Thickness = 0.015):\n return Layer(Thickness,'nkAg','c')\ndef TiO2lowE(Thickness = 0.030):\n return Layer(Thickness,'nkTiO2','c')\ndef TiO2lowEfat(Thickness = 0.060):\n return Layer(Thickness,'nkTiO2','c')\ndef Bleach(Thickness = 0.370):\n return Layer(Thickness,'nkBleach','c')\ndef ClAlPc(Thickness = 0.300):\n return Layer(Thickness,'nkClAlPc','c')\ndef C60(Thickness = 0.200):\n return Layer(Thickness,'nkC60','c')\ndef IR(Thickness = 0.060):\n return Layer(Thickness,'nkPTB7_ThIEICO_4F','c')\ndef MAPBr(Thickness = 0.500):\n return Layer(Thickness,'nkMAPbBr3','c')\ndef EVA(Thickness = 3000):\n return Layer(Thickness,'nkEVA','i')\n\n\n'''We are boundary conditions corresponding to each material type\nCan be changed to tune optimization range'''\nGlassBound = (5999,6001)\nTiO2Bound = (0.025,.1)\nFTOBound = (0.1,0.5)\nMAPIBound = (.06,.260)\nAZOBound = (.1,.4)\nITOBound = (.1,.4)\nITOlowEBound = (0.03,.15)\nSnO2Bound = (.025,.1)\nSnO2lowEBound = (.015,.06)\nSnO2lowEfatBound = (0.025,.1)\nSiO2Bound = (.012,.05)\nNiOBound = (.025,.1)\nAgBound = (.0149, .0151)\nTiO2lowEBound = (.015, .070)\nTiO2lowEfatBound = (.03,.12)\nBleachBound = (.180, .500)\nClAlPcBound = (.150, .600)\nC60Bound = (.100,.400)\nIRBound = (.030, .12)\nMAPBrBound = (.250,1)\nEVABound = (2999,3001)\n\n\n'''I assemble a list of layer objects using Thicknesses and Materials''' \ndef GiveLayers(Thickness,Materials):\n x = len(Materials)\n if x == len(Thickness):\n Layers = []\n for i in range(x):\n Layers.append(Materials[i](Thickness[i]))\n return Layers\n else: \n raise ValueError ('layers and Thickness lengths do not match')\n\n'''I give a list of boundaries from a list of materials. Dict is a dictionary containing the boundary conditions\nAll items in the dicitonary are labelled as 'Material'+'Bound' '''\n'''\ndef GiveBounds(Materials, DictBound):\n x = len(Materials)\n Bounds = []\n for i in range(x):\n Bounds.append(DictBound[Materials[i].__name__ + 'Bound'])\n Bounds = array(Bounds)\n return Bounds\n'''\n\n'''I produce a Bounds object that defines the boundary conditions for optimization\nThe version above can be used to produce a list of bounds rather than an object'''\n\ndef GiveBounds(Materials, DictBound):\n x = len(Materials)\n lb = []\n ub = []\n for i in range(x):\n lb.append(DictBound[Materials[i].__name__ + 'Bound'][0])\n for i in range(x):\n ub.append(DictBound[Materials[i].__name__ + 'Bound'][1])\n bounds = scipy.optimize.Bounds(lb,ub)\n return bounds\n\n'''I give a list of thicknesses from a list of materials. Dict is a dictionary containing the thickness values\nAll items in the dicitonary are labelled as 'Material'+'Th' '''\ndef GiveThicks(Materials, DictTh):\n x = len(Materials)\n Th = []\n for i in range(x):\n Th.append(DictTh[Materials[i].__name__ + 'Th'])\n return Th\n\n'''Calculates Spectra Based on the layers of the cell\nAbsorberLayer is an integer giving the position of the PV layer in the stack. Currently supports 1 PV layer'''\ndef Spectra(layers, AbsorberLayer):\n thicks = [inf]\n iorcs = ['i']\n for layer in layers:\n thicks.append(layer.d)\n iorcs.append(layer.i_or_c)\n thicks.append(inf)\n iorcs.append('i')\n \n \n thicks_bw = thicks[::-1]\n iorcs_bw = iorcs[::-1]\n\n Ts = []\n Rfs = []\n Rbs = []\n AbsByAbsorbers = []\n #EQEs2 = []\n #IREQEs = []\n\n\n layerchoice = AbsorberLayer \n #layerchoice2 = 5\n\n for lam in lams:\n\n nks = [1]\n for layer in layers:\n nks.append(layer.nk(lam))\n nks.append(1)\n \n nks_bw = nks[::-1]\n \n front_spol = inc_tmm('s',nks,thicks,iorcs,inc_angle,lam)\n front_ppol = inc_tmm('p',nks,thicks,iorcs,inc_angle,lam)\n back_spol = inc_tmm('s',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)\n back_ppol = inc_tmm('p',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)\n \n AbsByAbsorber_spol = inc_absorp_in_each_layer(front_spol)[layerchoice]\n AbsByAbsorber_ppol = inc_absorp_in_each_layer(front_ppol)[layerchoice]\n \n AbsByAbsorbers.append( (AbsByAbsorber_spol + AbsByAbsorber_ppol) / 2. )\n \n # EQE_spol2 = tmm.inc_absorp_in_each_layer(front_spol)[layerchoice2]\n # EQE_ppol2 = tmm.inc_absorp_in_each_layer(front_ppol)[layerchoice2]\n \n # EQEs2.append( (EQE_spol2 + EQE_ppol2) / 2. )\n \n Rfs.append( (front_spol['R']+front_ppol['R']) / 2.)\n Rbs.append( (back_spol['R']+back_ppol['R']) / 2.)\n Ts.append( (front_spol['T']+front_ppol['T']) / 2. )\n\n\n Ts = array(Ts)\n Rfs = array(Rfs)\n Rbs = array(Rbs)\n As = 1-Ts-Rfs\n sanities = Ts+Rfs+As\n\n AbsByAbsorbers = array(AbsByAbsorbers)\n Spectra = {'AbsByAbsorbers':AbsByAbsorbers, 'Ts':Ts,'Rfs':Rfs,'Rbs':Rbs,'As':As,'Total':sanities}\n return Spectra\n\n\n\n\n''' Here I calculate VLT and spit it out to the screen'''\n\n'''Gives a spectrum of VLT. Used for plotting'''\ndef VLTSpectrum(layers):\n return Stack(layers)\n'''Gives VLT as a single number'''\ndef VLT(layers):\n VLTstack=Stack(layers)\n return VLTstack.get_visible_light_transmission(lams,inc_angle)\n\n'''This gives VLT as a single number. eliminates\nneed to recalculate AM15G and cieplf every iteration. Unclear if this will work for \noptimization'''\ndef getFancyVLT(layers):#,lamrange,inc_angle):\n integ = vegas.Integrator([lams])\n Trans=Stack(layers)\n numerator = integ(lambda lam: AM15G(lam)*cieplf(lam)*Trans.get_RAT(lam,inc_angle)[2], nitn=10, neval=100)[0]\n denominator = integ(lambda lam: AM15G(lam)*cieplf(lam), nitn=10, neval=100)[0]\n VLT = numerator/denominator\n return VLT.mean\n\n'''Gives minimum and maximum VLT based exclusively on the PV layer. \nOnly useful for judging VLT constraint for a given PV material\nRequires input of single absorber layer with a tuple of (lb,ub)'''\ndef GiveMinMaxVLT(AbsorberType, Bounds):\n minThick = GiveLayers([Bounds[0]], [AbsorberType]) \n maxThick = GiveLayers([Bounds[1]], [AbsorberType])\n minimum = VLT(maxThick)\n maximum = VLT(minThick)\n return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],\n 'maxThick':Bounds[1]}\n\n'''Gives minimum and maximum VLT based exclusively on the PV layer. \nRequires list of materials, absorbing layer, and absorber bounds'''\ndef GiveMinMaxVLTFromMaterials(Materials, AbsorberLayer, Bounds):\n AbsorberType = Materials[AbsorberLayer-1]\n minThick = GiveLayers([Bounds[0]], [AbsorberType]) \n maxThick = GiveLayers([Bounds[1]], [AbsorberType])\n minimum = VLT(maxThick)\n maximum = VLT(minThick)\n return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],\n 'maxThick':Bounds[1]}\n\n\n\n# ******************** Here I add PCE calculation *********************#\n \n'''This stuff imports a spreadsheet of the solar spectrum'''\n#worksheet = pandas.read_excel('https://www.nrel.gov/grid/solar-resource/assets/data/astmg173.xls')\nworksheet = read_excel('./Data/ASTMG173.xls')#('https://www.nrel.gov/grid/solar-resource/assets/data/astmg173.xls')\n#worksheet = pandas.read_excel('/Users/lwheeler/Code/pv-window-bem/Data/astmg173.xls')\ndownloaded_array = array(worksheet)\n\n# Wavelength is in column 0, AM1.5G data is column 2\nAM15 = downloaded_array[1:, [0,2]]\n\n# The first line should be 280.0 , 4.7309E-23\n# The last line should be 4000.0, 7.1043E-03\n# print(AM15)\n\n# Interpolate to get a continuous function which I will be able to do integrals on:\n'''Interpolated solar spectrum\nwhen using, inputs must be within 300-2500 nm'''\nAM15interp = interp1d(AM15[:,0]/1000, AM15[:,1])\n\n\n# Here’s the plot, it looks correct:\n'''Plot of the solar spectrum for verification'''\n'''\ny_values = np.array([AM15interp(x) for x in lams])\nfigure()\nplot(lams , y_values)\nxlabel(\"Wavelength (nm)\")\nylabel(\"Spectral intensity (W/m$^2$/nm)\")\ntitle(\"Light from the sun\");\nshow()\n'''\n\n\n'''I convert wavelength to energy. E_min and max are used for integration limits '''\nEphoton = hPlanck * c0 / lams *1e6 #J\nE_min = min(Ephoton) #J energy units from hPlanck\nE_max = max(Ephoton) #J energy units from hPlanck\n\n\n'''I give the number of photons per......'''\ndef SPhotonsPerTEA(Ephoton):\n λ = hPlanck * c0 / Ephoton *1e6 #um\n return AM15interp(λ) * (1 / Ephoton) * (hPlanck * c0 / Ephoton**2) * 1e9\n'''I give the power for each......'''\ndef PowerPerTEA(Ephoton):\n return Ephoton * SPhotonsPerTEA(Ephoton)\n'''I give the solar constant which is the W/m*2 emitted by the sun. Should be ~1000'''\ndef Solar_Constant(Ephoton):\n #PowerPerTEA = lambda E : E * SPhotonsPerTEA(E)\n return quad(PowerPerTEA,E_min,E_max, full_output=1)[0]\n# quad() is ordinary integration; full_output=1 is (surprisingly) how you hide\n# the messages warning about poor accuracy in integrating.\n'''This is the solar constant value. It is called by optimization and used in a variety of functions here\nShould always be ~1000'''\nsolar_constant = Solar_Constant(Ephoton)\n\n'''I return an interpolated function of a spectrum relative to photon wavelength. Used for plotting'''\ndef GivelamsInterp(Parameter):\n Curve = Parameter.round(8)\n return interp1d(lams, Curve)\n\n'''I return an interpolated function of a spectrum relative to photon energy'''\ndef GiveEInterp(Parameter):\n Curve = Parameter.round(8)\n return interp1d(Ephoton, Curve)\n\n'''I give Q based on a given spectrum. Units are W/m^2\nInput is a spectrum interpolated with respect to energy, E\neta should only be used if looking at a PV layer. Otherwise it is set to 1'''\ndef GiveQ(Spectra, eta = 1):#Spectra must be an interpolated function\n def integrand(E):\n return eta * Spectra(E) * PowerPerTEA(E)\n return quad(integrand, E_min, E_max, full_output=1)[0] \n \n'''\n#trapz calcs\ndef GiveQ(Spectra, eta = 1):#Spectra must be an array\n integrand = eta*Spectra*PowerPerTEA(Ephoton)\n return -np.trapz(integrand, Ephoton) \n'''\n\n'''\ndef GivePhotons(Spectra, eta):#Spectra must be an interpolated function\n def integrand(E):\n return eta * Spectra(E) * SPhotonsPerTEA(E)\n return quad(integrand, E_min, E_max)[0] \n'''\n# Here I input the spectrum of photons absorbed by the absorber material (Absorbed)\n# and the electron-hole pair extraction efficiency (eta). EQE = eta * Absorbed\n\n'''I give the rate of recombination for the solar cell, Units are photons/(s*m**2)'''\ndef RR0(eta,Absorbed,Tcell):\n integrand = lambda E : eta * Absorbed(E) * (E)**2 / (exp(E / (kB * Tcell)) - 1)\n integral = quad(integrand, E_min, E_max, full_output=1)[0]\n return ((2 * pi) / (c0**2 * hPlanck**3)) * integral# / 1.60218e-19 #J/eV\n#units = photons/(s*m**2)\n\n'''I give the amount of energy converted to electricity in terms of photons, units are photons(s/m**2)'''\ndef Generated(eta,Absorbed):\n integrand = lambda E : eta * Absorbed(E) * SPhotonsPerTEA(E)\n# integral = quad(integrand, E_min, E_max, full_output=1)[0]\n return quad(integrand, E_min, E_max, full_output=1)[0]\n#units photons/(s*m**2)\n'''\n#Using trapezoidal rule for integration instaed of quad\n#AbsByAbsorbers is an aray of intensities, not an interpolated function.\ndef RR0(eta,Absorbed,Tcell):\n AbsByAbsorbers = AbsByAbsorbers.round(8)\n integrand = eta * AbsByAbsorbers * (Ephoton)**2 / (np.exp(Ephoton / (kB * Tcell)) - 1)\n integral = trapz(integrand, Ephoton)\n return ((2 * np.pi) / (c0**2 * hPlanck**3)) * integral\n\ndef Generated(eta,Absorbed):\n Absorbed = Absorbed.round(8)\n integrand = eta * Absorbed * SPhotonsPerTEA(Ephoton)\n# integral = quad(integrand, E_min, E_max, full_output=1)[0]\n return np.trapz(integrand, Ephoton)\n'''\n\n'''I use the single diode equation to return the max power of the cell in watts\nCheck PVlib documentation for details'''\ndef Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell, n = 1, Ns = 1):\n data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed,Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)\n return data['p_mp']\n\n'''I calculate equilibrium tmperature of the cell assuming the cell is infinitely thin\nTotalAbs is the full absorptance of the stack as an array of intensities, uninterpolated. \nAbsorbed is PV layer absorptance interpolated\nTemperature calculation is implicit so the numerical solver fsolve is used.\nThis equation is derived from Wheeler and Wheeler Detailed Balance Analysis of Photovoltaic Windows'''\ndef TcellCalc(TotalAbs, eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh):\n AbsTotal = GiveEInterp(TotalAbs)\n Qabs = GiveQ(AbsTotal)\n Temp = lambda Tcell: (Qabs - Give_Pmp(eta,Absorbed,Rs,Rsh, Tcell) + Ui*Ti + Uo*To)/(Ui + Uo)-Tcell\n return fsolve(Temp, 300)[0]\n\n\n'''I use the single diode equation to produce an IV curve and power plot\nI also return related values such as Voc, Isc, and Pmp in units volts, amps, and watts\nSee pvlib singlediode equation for more information'''\ndef GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1):\n data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed, Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)\n\n Isc = data['i_sc']\n Voc = data['v_oc']\n Imp = data['i_mp']\n Vmp = data['v_mp']\n Pmp = data['p_mp']\n Vvalues = array(data['v'])\n Ivalues = array(data['i'])\n #print('Isc = ', Isc, ', Voc = ', Voc, ', Imp = ', Imp, ', Vmp = ', Vmp, ', Pmp =', Pmp)\n\n figure()\n plot(Vvalues,Ivalues, label = 'IV')\n xlabel('Voltage, (V)')\n ylabel('Current (A) or Power (W/m^2)')\n ylabel('Power (W/m^2)')\n P_values = array([Ivalues * Vvalues])\n plot(Vvalues , P_values.T, label = 'Power')\n ylim(-1, 150)\n legend(loc = 'upper right')\n show()\n return data\n\n\n\n'''I give the solar heat gain coefficient. unitless numebr between 0 and 1\nTs is the transmission spectra. Must be a list of intensities, not an interpolated function\nThis equation comes form a combination of Wheeler and Wheeler Detailed Balance Analysis of Photovoltaic Windows\nand equation 3.18 from Fundamentals of Heat and Mass Transfer 6ed Incropera'''\ndef SHGC(Ts, Ti, To, Tcell, Ui):\n #Tcell = TcellCalc(As,Ti,To,eta,Absorbed)\n Rtot = 1/Ui #This is approximate because Ui is assumed\n #Included in GiveQ for simplicity but should not be used for calculating SHGC\n TransTotal = GiveEInterp(Ts)\n Qtrans = GiveQ(TransTotal,1)\n return (Qtrans + Ui*(Tcell-Ti) - ((To-Ti)/Rtot))/solar_constant\n\n'''I give max efficiency also called PCE'''\n'''Absorbed must be an interpolated function of the absorption spectrum of the PV layer'''\ndef max_efficiency(eta,Absorbed,Tcell, Rs, Rsh):\n #Tcell = TcellCalc(As,Ti,To,eta,Absorbed)\n return Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell) / solar_constant\n\n'''I give important info about a solar cell such as PCE, SHGC, Temperature, etc'''\ndef GiveImportantInfo(Thickness, Materials,eta,Ti,To,Ui,Uo,Rs,Rsh,AbsorberLayer,Angle=0):\n global inc_angle\n inc_angle = giveincangle(Angle)\n \n layers = GiveLayers(Thickness,Materials)\n \n spectra = Spectra(layers ,AbsorberLayer)\n AbsByAbsorbers = spectra['AbsByAbsorbers']\n Ts = spectra['Ts']\n Rfs = spectra['Rfs']\n Rbs = spectra['Rbs']\n As = spectra['As']\n sanities = spectra['Total']\n Absorbed = GiveEInterp(AbsByAbsorbers)\n VLTcalc = cvs.getVLT(Ts,lams)#VLT(layers)\n Tcell = TcellCalc(As,eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh)\n #Absorbed = tpc.GiveEInterp(tpc.Spectra(tpc.GiveLayers(Thickness, Materials),4)['AbsByAbsorbers'])\n data = GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1)\n Isc = data['i_sc']\n Voc = data['v_oc']\n Imp = data['i_mp']\n Vmp = data['v_mp']\n Pmp = data['p_mp']\n SHGCcalc = SHGC(Ts, Ti, To, Tcell, Ui)\n PCE = max_efficiency(eta,Absorbed,Tcell, Rs, Rsh)\n\n\n #Spectral Curves\n figure()\n plot(lams,Rfs,color='magenta',marker=None,label=\"$R_f$\")\n plot(lams,Ts,color='green',marker=None,label=\"$T$\")\n plot(lams,Rbs,color='purple',marker=None,label=\"$R_b$\")\n plot(lams,As,color='black',marker=None,label=\"A\")\n plot(lams,AbsByAbsorbers,color='black',linestyle='--',marker=None,label=\"AbsByAbsorber\")\n plot(lams,sanities,color='gold',marker=None,label=\"R+A+T\")\n plot(lams,VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label=\"photopic\")\n xlabel('wavelength, $\\mu$m')\n ylabel('Intensity')\n legend(loc = 'upper right')\n show()\n \n EphotoneV = Ephoton*6.241509e+18 \n figure()\n plot(EphotoneV, Ts, color='magenta',marker=None,label=\"$T$\")\n plot(EphotoneV, Rfs,color='green',marker=None,label=\"$R_f$\")\n plot(EphotoneV, Rbs,color='orange',marker=None,label=\"$R_b$\")\n plot(EphotoneV, AbsByAbsorbers,color='black',marker=None,label=\"Abs\")\n #plot(Ephoton,tpc.VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label=\"photopic\")\n legend(loc = 'upper right')\n xlabel('Energy, eV')\n ylabel('Intensity')\n show()\n\n pvc.GiveColorSwatch(Ts, Rfs)\n pvc.plot_xy_on_fin(Ts, Rfs)\n\n print('PCE = ',PCE,'VLT = ', VLTcalc, 'SHGC = ',SHGCcalc, 'Tcell = ',Tcell)#,'time to calculate PCE from scratch in seconds = ', TimePCE, 'Time to run optimizer in minutes = ',TimeOptimize/60)\n return {'PCE':PCE, 'VLT':VLTcalc, 'SHGC':SHGCcalc, 'Tcell':Tcell,'Isc':Isc, 'Voc': Voc, 'Imp': Imp, 'Vmp': Vmp,'Pmp': Pmp}\n" ]
[ [ "scipy.integrate.quad", "matplotlib.pyplot.legend", "scipy.interpolate.interp1d", "scipy.optimize.fsolve", "matplotlib.pyplot.figure", "pandas.read_excel", "numpy.exp", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ylim", "numpy.array", "matplotlib.pyplot.plot", "numpy.linspace", "matplotlib.pyplot.xlabel" ] ]
zelhar/mg21
[ "f8392aba7deb63aa85f3d137ef81dea1bb742b41" ]
[ "demoNN.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader, Dataset, TensorDataset\nfrom torchvision import datasets\nfrom torchvision.transforms import ToTensor, Lambda, Compose\nimport matplotlib.pyplot as plt\n\nimport torch.distributions as D\n\nimport torch.nn.functional as F\n\n# Download training data from open datasets.\ntraining_data = datasets.FashionMNIST(\n root=\"data\",\n train=True,\n download=True,\n transform=ToTensor(),\n)\n\n# Download test data from open datasets.\ntest_data = datasets.FashionMNIST(\n root=\"data\",\n train=False,\n download=True,\n transform=ToTensor(),\n)\n\nbatch_size = 64\n\n# Create data loaders.\ntrain_dataloader = DataLoader(training_data, batch_size=batch_size)\ntest_dataloader = DataLoader(test_data, batch_size=batch_size)\n\nfor X, y in test_dataloader:\n print(\"Shape of X [N, C, H, W]: \", X.shape)\n print(\"Shape of y: \", y.shape, y.dtype)\n break\n\n# testing synthetic dataset\nx = torch.randn((100,3,28,28))\n\nd = TensorDataset(x)\n\nz = d.__getitem__(2) # retuns 1-tuple of tensor (no label) \nz[0].shape\n\n# with labels\ny = torch.randint(low=0, high=1, size=(100,))\nd = TensorDataset(x,y)\nz = d.__getitem__(2) # retuns 1-tuple of tensor (no label) \nz[0].shape\nz[1].shape\n\n# Get cpu or gpu device for training.\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nprint(\"Using {} device\".format(device))\n\n# Define model\nclass NeuralNetwork(nn.Module):\n def __init__(self):\n super(NeuralNetwork, self).__init__()\n self.flatten = nn.Flatten()\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(28*28, 512),\n nn.ReLU(),\n nn.Linear(512, 512),\n nn.ReLU(),\n nn.Linear(512, 10)\n )\n\n def forward(self, x):\n x = self.flatten(x)\n logits = self.linear_relu_stack(x)\n return logits\n\nmodel = NeuralNetwork().to(device)\nprint(model)\n\nloss_fn = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n\ndef train(dataloader, model, loss_fn, optimizer):\n size = len(dataloader.dataset)\n model.train()\n for batch, (X, y) in enumerate(dataloader):\n X, y = X.to(device), y.to(device)\n\n # Compute prediction error\n pred = model(X)\n loss = loss_fn(pred, y)\n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if batch % 100 == 0:\n loss, current = loss.item(), batch * len(X)\n print(f\"loss: {loss:>7f} [{current:>5d}/{size:>5d}]\")\n\ndef test(dataloader, model, loss_fn):\n size = len(dataloader.dataset)\n num_batches = len(dataloader)\n model.eval()\n test_loss, correct = 0, 0\n with torch.no_grad():\n for X, y in dataloader:\n X, y = X.to(device), y.to(device)\n pred = model(X)\n test_loss += loss_fn(pred, y).item()\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n test_loss /= num_batches\n correct /= size\n print(f\"Test Error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\")\n\nepochs = 5\nfor t in range(epochs):\n print(f\"Epoch {t+1}\\n-------------------------------\")\n train(train_dataloader, model, loss_fn, optimizer)\n test(test_dataloader, model, loss_fn)\nprint(\"Done!\")\n\n\n\nbce = nn.BCELoss(reduction=\"none\")\nx = torch.tensor(0.5)\ny = torch.tensor(0.7)\nbce(x,y)\n\nf = lambda x, y: y * torch.log(x) + (1-y) * torch.log(1-x)\nf(x,y)\n\n\ntorch.softmax(torch.tensor([1,2,3]), 0, torch.float64)\n\n# generate mixed distributions\nm = D.OneHotCategorical(torch.tensor([1,2,3,6]))\nm.sample()\nm.sample_n(10)\nm.sample((3,4))\n\nm = D.Normal(torch.tensor([0,10.0]), torch.tensor([1.0,2]))\n\nm.sample((3,4))\n\n# Example of target with class indices\nloss = nn.CrossEntropyLoss()\n\ninput = torch.randn(3, 5, requires_grad=True)\ntarget = torch.empty(3, dtype=torch.long).random_(5)\noutput = loss(input, target)\noutput.backward()\n\n# Example of target with class probabilities\ninput = torch.randn(3, 5, requires_grad=True)\ntarget = torch.randn(3, 5).softmax(dim=1)\noutput = loss(input, target)\noutput.backward()\n\ninput = torch.randn((3, 2), requires_grad=True)\ntarget = torch.rand((3, 2), requires_grad=False)\nloss = F.binary_cross_entropy(F.sigmoid(input), target)\nloss.backward()\n\nloss = nn.BCELoss(reduction=\"none\")\nx = torch.tensor([0,0.25,0.5,0.75,1])\nF.binary_cross_entropy(x,x,reduction=\"none\")\nloss(x,x)\n\nx = torch.tensor([0,25,0.5,0.75,1])\ny = torch.tensor([0,0.25,0.5,0.75,1])\nloss(x,y)\n" ]
[ [ "torch.empty", "torch.utils.data.DataLoader", "torch.nn.functional.sigmoid", "torch.randint", "torch.nn.Flatten", "torch.nn.Linear", "torch.randn", "torch.rand", "torch.no_grad", "torch.tensor", "torch.nn.CrossEntropyLoss", "torch.cuda.is_available", "torch.log", "torch.nn.BCELoss", "torch.utils.data.TensorDataset", "torch.nn.ReLU", "torch.nn.functional.binary_cross_entropy" ] ]
ajrcampbell/pyro
[ "37680e6d08f20cda95729427143f17875484b21d" ]
[ "pyro/distributions/reflected.py" ]
[ "from torch.distributions import constraints\nfrom torch.distributions.transforms import AbsTransform\n\nfrom pyro.distributions.torch import TransformedDistribution\n\n\nclass ReflectedDistribution(TransformedDistribution):\n \"\"\"\n Equivalent to ``TransformedDistribution(base_dist, AbsTransform())``,\n but additionally supports :meth:`log_prob` .\n\n :param ~torch.distributions.Distribution base_dist: The distribution to\n reflect.\n \"\"\"\n support = constraints.positive\n\n def __init__(self, base_dist, validate_args=None):\n if base_dist.event_shape:\n raise ValueError(\"Only univariate distributions can be reflected.\")\n super().__init__(base_dist, AbsTransform(), validate_args)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(type(self), _instance)\n return super().expand(batch_shape, _instance=new)\n\n def log_prob(self, value):\n if self._validate_args:\n self._validate_sample(value)\n dim = max(len(self.batch_shape), value.dim())\n plus_minus = value.new_tensor([1., -1.]).reshape((2,) + (1,) * dim)\n return self.base_dist.log_prob(plus_minus * value).logsumexp(0)\n" ]
[ [ "torch.distributions.transforms.AbsTransform" ] ]
psemdel/py-trading-bot
[ "69da4164b3f6a3ed3e6dc81d5aefc0273b4cb019" ]
[ "bot/orders/models.py" ]
[ "from django.db import models\nfrom django.utils import timezone\nfrom django.db.models import Q\n\nimport asyncio\nfrom ib_insync import IB, Stock, MarketOrder, util\nfrom core.common import empty_append\nfrom core.indicators import rel_dif\n\nimport vectorbtpro as vbt\nimport sys\nimport math\n\nimport pandas as pd\nimport numpy as np\n\nfrom trading_bot.settings import (PERFORM_ORDER, USE_IB_FOR_DATA,DIC_PERFORM_ORDER,\n IB_LOCALHOST, IB_PORT)\n\n### Interactive brockers and data retrieval ###\n'''\nContains:\n- Communication with Interactive brokers\n- Retrieval of live data (Interactive brokers or YFinance)\n- Performing order\n- Models for financial products, stock exchanges...\n\nNote: for some reasons, it does not work if myIB class is not in models\n'''\n\n## All symbols must be from same stock exchange\ndef retrieve_data(symbols,period,**kwargs):\n try:\n IBok=True\n for symbol in symbols:\n if kwargs.get(\"index\",False):\n action=Index.objects.get(symbol=symbol)\n else:\n action=Action.objects.get(symbol=symbol)\n \n if action.stock_ex.ib_ticker in [\"BVME.ETF\"]:\n IBok=False\n break\n \n index_symbol=exchange_to_symbol(action)\n \n if (USE_IB_FOR_DATA and IBok) or kwargs.get(\"useIB\",False): \n fig= ''.join(x for x in period if x.isdigit())\n if period.find(\"d\")!=-1:\n period_ib=fig +\" D\"\n elif period.find(\"mo\")!=-1:\n period_ib=fig +\" M\"\n elif period.find(\"y\")!=-1:\n period_ib=fig +\" Y\" \n \n #Time period of one bar. Must be one of: ‘1 secs’, ‘5 secs’, ‘10 secs’ 15 secs’, ‘30 secs’, ‘1 min’, ‘2 mins’, ‘3 mins’, ‘5 mins’, ‘10 mins’, ‘15 mins’, ‘20 mins’, ‘30 mins’, ‘1 hour’, ‘2 hours’, ‘3 hours’, ‘4 hours’, ‘8 hours’, ‘1 day’, ‘1 week’, ‘1 month’.\n if kwargs.get(\"interval\",False):\n fig= ''.join(x for x in kwargs.get(\"interval\") if x.isdigit())\n if period.find(\"m\")!=-1:\n interval=fig +\" mins\"\n elif period.find(\"h\")!=-1:\n interval=fig +\" hours\"\n elif period.find(\"d\")!=-1:\n interval=fig +\" day\"\n else:\n interval='1 day'\n \n open_=[]\n close=[]\n low=[]\n high=[]\n \n myIB=MyIB()\n for symbol in symbols:\n action=Action.objects.get(symbol=symbol)\n contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)\n bars = myIB.ib.reqHistoricalData(\n contract,\n endDateTime='',\n durationStr=period_ib, #\"10 D\",\"1 M\"\n barSizeSetting=interval, #\"1 day\", \"1 min\"\n whatToShow='TRADES',\n useRTH=True,\n formatDate=1)\n \n df=util.df(bars)\n open_=empty_append(open_,df[\"open\"].values,axis=1)\n close=empty_append(close,df[\"close\"].values,axis=1)\n high=empty_append(high,df[\"high\"].values,axis=1)\n low=empty_append(low,df[\"low\"].values,axis=1)\n volume=empty_append(low,df[\"volume\"].values,axis=1)\n \n cours_open=pd.DataFrame(data=open_,index=df[\"date\"],columns=symbols)\n cours_close=pd.DataFrame(data=close,index=df[\"date\"],columns=symbols)\n cours_low=pd.DataFrame(data=low,index=df[\"date\"],columns=symbols)\n cours_high=pd.DataFrame(data=high,index=df[\"date\"],columns=symbols)\n cours_volume=pd.DataFrame(data=volume,index=df[\"date\"],columns=symbols)\n \n action=Action.objects.get(symbol=index_symbol)\n contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)\n bars = myIB.ib.reqHistoricalData(\n contract,\n endDateTime='',\n durationStr=period_ib, #\"10 D\",\"1 M\"\n barSizeSetting=interval, #\"1 day\", \"1 min\"\n whatToShow='TRADES',\n useRTH=True,\n formatDate=1)\n \n df=util.df(bars)\n cours_open_ind=df[\"open\"]\n cours_close_ind=df[\"close\"]\n cours_high_ind=df[\"high\"]\n cours_low_ind=df[\"low\"]\n cours_volume_ind=df[\"volume\"]\n #Volume\n \n if len(cours_close_ind)!=len(cours_close):\n print(\"cours index is different from cours length\")\n \n myIB.disconnect()\n else:\n all_symbols=symbols+[index_symbol]\n cours=vbt.YFData.fetch(all_symbols, period=period,missing_index='drop',**kwargs)\n cours_action=cours.select(symbols)\n cours_open =cours_action.get('Open')\n cours_high=cours_action.get('High')\n cours_low=cours_action.get('Low')\n cours_close=cours_action.get('Close')\n cours_volume=cours_action.get('Volume')\n print(\"number of days retrieved: \" + str(np.shape(cours_close)[0]))\n \n cours_index=cours.select(index_symbol)\n cours_open_ind =cours_index.get('Open')\n cours_high_ind=cours_index.get('High')\n cours_low_ind=cours_index.get('Low')\n cours_close_ind=cours_index.get('Close')\n cours_volume_ind=cours_index.get('Volume')\n\n debug=False\n if debug:\n for symbol in all_symbols:\n data=vbt.YFData.fetch(symbol, period=period,**kwargs)\n \n #knowing what we drop\n close_debug=data.get(\"Close\")\n for ii in range(len(close_debug)):\n if math.isnan(close_debug.values[ii]):\n print(symbol)\n print(\"dropping at least \" + str(close_debug.index[ii]))\n \n return cours_high, cours_low, cours_close, cours_open, cours_volume, \\\n cours_high_ind, cours_low_ind, cours_close_ind, cours_open_ind,\\\n cours_volume_ind\n \n except Exception as msg:\n print(msg)\n print(\"exception in \" + __name__)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n print(msg) \n\ndef exchange_to_symbol(action):\n if action.stock_ex.ib_ticker==\"SBF\":\n return \"^FCHI\"\n elif action.stock_ex.ib_ticker==\"IBIS\":\n return \"^GDAXI\"\n elif action.stock_ex.ib_ticker==\"NASDAQ\":\n return \"^IXIC\"\n elif action.stock_ex.ib_ticker==\"BVME.ETF\":\n return \"^IXIC\" #it is only ETF anyhow\n\ndef get_exchange_actions(exchange):\n cat=ActionCategory.objects.get(short=\"ACT\")\n stockEx=StockEx.objects.get(name=exchange)\n \n c1 = Q(category=cat)\n c2 = Q(stock_ex=stockEx)\n \n actions=Action.objects.filter(c1 & c2)\n return [ob.symbol for ob in actions]\n \ndef retrieve_ib_pf():\n myIB=MyIB()\n pf=[]\n pf_short=[]\n \n for pos in myIB.ib.positions():\n contract=pos.contract\n action=Action.objects.get(ib_ticker=contract.localSymbol)\n \n if pos.position>0:\n pf.append(action.symbol)\n else:\n pf_short.append(action.symbol)\n\n myIB.disconnect()\n return pf, pf_short\n\n#for SL check\ndef get_last_price(symbol,**kwargs):\n try:\n if kwargs.get(\"index\",False):\n action=Index.objects.get(symbol=symbol)\n else:\n action=Action.objects.get(symbol=symbol) \n\n if USE_IB_FOR_DATA and action.stock_ex.ib_ticker not in [\"BVME.ETF\"]:\n myIB=MyIB()\n contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)\n cours_pres=myIB.get_last_price(contract)\n myIB.disconnect()\n else: #YF\n cours=vbt.YFData.fetch([symbol], period=\"2d\")\n cours_close=cours.get(\"Close\")\n cours_pres=cours_close[symbol].iloc[-1]\n \n return cours_pres\n except Exception as msg:\n print(symbol)\n print(\"exception in \" + __name__)\n print(msg)\n\ndef get_ratio(symbol,**kwargs):\n try:\n if kwargs.get(\"index\",False):\n action=Index.objects.get(symbol=symbol)\n else:\n action=Action.objects.get(symbol=symbol)\n \n if USE_IB_FOR_DATA and action.stock_ex.ib_ticker not in [\"BVME.ETF\"]:\n myIB=MyIB()\n contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)\n cours_pres=myIB.get_last_price(contract)\n cours_ref, cours_open=myIB.get_past_closing_price(contract)\n \n if kwargs.get(\"opening\",False):\n cours_pres=cours_open\n \n myIB.disconnect()\n else: #YF\n cours=vbt.YFData.fetch([symbol], period=\"2d\")\n cours_close=cours.get(\"Close\")\n\n cours_ref=cours_close[symbol].iloc[0]\n \n if kwargs.get(\"opening\",False):\n cours_open=cours.get(\"Open\")\n cours_pres=cours_open[symbol].iloc[-1]\n else:\n cours_pres=cours_close[symbol].iloc[-1]\n\n return rel_dif(cours_pres,\n cours_ref\n )*100\n except Exception as msg:\n print(symbol)\n print(\"exception in \" + __name__)\n print(msg)\n\nclass MyIB():\n def __init__(self):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n self.ib = IB()\n self.ib.connect(host=IB_LOCALHOST, port=IB_PORT, clientId=1)\n \n def cash_balance(self):\n try:\n for v in self.ib.accountSummary():\n if v.tag == 'CashBalance':\n return float(v.value)\n except:\n return 0\n \n def test(self,symbol):\n action=Action.objects.get(symbol=symbol)\n contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)\n print(self.ib.qualifyContracts(contract)) \n \n def retrieve(self,contract,period):\n \n bars = self.ib.reqHistoricalData(\n contract,\n endDateTime='',\n durationStr=period, #\"10 D\",\"1 M\"\n barSizeSetting='1 hour', #\"1 day\", \"1 min\"\n whatToShow='TRADES',\n useRTH=True,\n formatDate=1)\n\n return util.df(bars)\n \n def get_last_price(self,contract):\n m_data = self.ib.reqMktData(contract)\n while m_data.last != m_data.last: #Wait until data is in. \n self.ib.sleep(0.01)\n self.ib.cancelMktData(contract)\n return m_data.last\n \n def get_past_closing_price(self,contract):\n period=\"2 D\"\n bars = self.ib.reqHistoricalData(\n contract,\n endDateTime='',\n durationStr=period, #\"10 D\",\"1 M\"\n barSizeSetting='1 day', #\"1 day\", \"1 min\"\n whatToShow='TRADES',\n useRTH=True,\n formatDate=1)\n df=util.df(bars)\n return df.iloc[0][\"close\"], df.iloc[-1][\"open\"]\n \n def place(self,buy,ticker,currency,exchange,**kwargs): #quantity in euros\n if ticker==\"AAA\":\n print(\"ticker not found\")\n return \"\", 0\n else:\n contract = Stock(ticker, exchange, currency)\n self.ib.qualifyContracts(contract)\n \n if buy:\n order_size=kwargs.get(\"order_size\",0)\n last_price=self.get_last_price(contract)\n quantity=math.floor(order_size/last_price)\n order = MarketOrder('BUY', quantity)\n else:\n quantity=kwargs.get(\"quantity\",0)\n order = MarketOrder('SELL', quantity)\n trade = self.ib.placeOrder(contract, order)\n \n self.ib.sleep(1.0)\n if trade.orderStatus.status == 'Filled':\n fill = trade.fills[-1]\n txt=f'{fill.time} - {fill.execution.side} {fill.contract.symbol} {fill.execution.shares} @ {fill.execution.avgPrice}'\n price=fill.execution.avgPrice \n return txt, price, quantity\n \n def exit_order(self,symbol,strategy, exchange,short,**kwargs): \n #type check necessary for indexes\n try:\n pf= get_pf(strategy, exchange,short)\n ocap=get_order_capital(strategy, exchange,short)\n \n if kwargs.get(\"index\",False):\n index=Index.objects.get(symbol=symbol) #actually should be more complex\n if short:\n action=index.etf_short\n else:\n action=index.etf_long\n else:\n action=Action.objects.get(symbol=symbol)\n \n if symbol in pf.retrieve():\n c1 = Q(action=action)\n c2 = Q(active=True)\n \n order=Order.objects.filter(c1 & c2)\n\n #profit\n if len(order)>0:\n txt, order[0].exiting_price, quantity= self.place(False,\n action.ib_ticker(),\n action.currency.symbol, \n action.stock_ex.ib_ticker,\n quantity=order[0].quantity)\n order[0].exiting_date=timezone.now()\n \n if order[0].entering_price is not None: \n order[0].profit=order[0].exiting_price-order[0].entering_price\n order[0].profit_percent=(order[0].exiting_price/order[0].entering_price-1)*100\n \n order[0].active=False\n order[0].save()\n \n ocap.capital+=1\n ocap.save()\n pf.remove(symbol)\n pf.save()\n \n return True\n else:\n print(\"order not found \" + symbol)\n return False\n return False\n \n except Exception as msg:\n print(\"exception in exit\")\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass\n\n def entry_order(self,symbol,strategy, exchange,short,**kwargs): \n try:\n #type check necessary for indexes\n pf= get_pf(strategy, exchange,short)\n order_size=5000\n ocap=get_order_capital(strategy, exchange,short)\n #accountSummary\n \n if kwargs.get(\"index\",False):\n index=Index.objects.get(symbol=symbol)\n if short:\n action=index.etf_short\n else:\n action=index.etf_long\n else:\n action=Action.objects.get(symbol=symbol)\n \n excluded=Excluded.objects.get(name=\"all\") #list of actions completely excluded from entries\n\n if (symbol not in pf.retrieve() and \n symbol not in excluded.retrieve() and \n ocap.capital>0 and\n order_size<=self.cash_balance()):\n \n order=Order(action=action, pf=pf)\n txt, order.entering_price, order.quantity= self.place(True,\n action.ib_ticker(),\n action.currency.symbol,\n action.stock_ex.ib_ticker,\n order_size=order_size)\n \n if kwargs.get(\"sl\",False):\n sl=kwargs.get(\"sl\")\n order.sl_threshold=order.entering_price*(1-sl)\n \n order.save()\n pf.append(symbol)\n pf.save()\n ocap.capital-=1\n ocap.save()\n \n return True\n return False\n except Exception as msg:\n print(\"exception in \" + __name__)\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass\n\n def disconnect(self):\n self.ib.disconnect()\n\ndef check_hold_duration(symbol,strategy, exchange,short,**kwargs): \n #type check necessary for indexes\n try:\n pf= get_pf(strategy, exchange,short)\n \n #accountSummary\n if kwargs.get(\"index\",False):\n index=Index.objects.get(symbol=symbol)\n if short:\n action=index.etf_short\n else:\n action=index.etf_long\n else:\n action=Action.objects.get(symbol=symbol)\n\n if symbol in pf.retrieve():\n c1 = Q(action=action)\n c2 = Q(active=True)\n order=Order.objects.filter(c1 & c2)\n if len(order)>0:\n delta=timezone.now()-order[0].entering_date\n return delta.days\n \n return 0\n except Exception as msg:\n print(\"exception in \" + __name__)\n print(msg)\n return 0\n\ndef entry_order(symbol,strategy, exchange,short,**kwargs): \n if PERFORM_ORDER and DIC_PERFORM_ORDER[strategy]:\n myIB=MyIB()\n return myIB.entry_order(symbol,strategy, exchange,short,**kwargs), True\n else: \n return entry_order_test(symbol,strategy, exchange,short,**kwargs), False\n \ndef exit_order(symbol,strategy, exchange,short,**kwargs): \n if PERFORM_ORDER and DIC_PERFORM_ORDER[strategy]:\n myIB=MyIB()\n return myIB.exit_order(symbol,strategy, exchange,short,**kwargs), True\n else: \n return exit_order_test(symbol,strategy, exchange,short,**kwargs), False\n\ndef entry_order_test(symbol,strategy, exchange,short,**kwargs): \n try:\n #type check necessary for indexes\n pf= get_pf(strategy, exchange,short)\n ocap=get_order_capital(strategy, exchange,short)\n \n if kwargs.get(\"index\",False):\n index=Index.objects.get(symbol=symbol)\n if short:\n action=index.etf_short\n else:\n action=index.etf_long\n else:\n action=Action.objects.get(symbol=symbol)\n symbol2=action.symbol\n \n excluded=Excluded.objects.get(name=\"all\") #list of actions completely excluded from entries\n \n if (symbol2 not in pf.retrieve() and \n symbol2 not in excluded.retrieve() and\n ocap.capital>0):\n order=Order(action=action, pf=pf)\n order.entering_price=1.0\n \n order.save()\n #post telegram\n pf.append(symbol2)\n \n pf.save()\n ocap.capital-=1 #also for short\n ocap.save()\n \n return True\n return False\n except Exception as msg:\n print(\"exception in \" + __name__)\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass\n \ndef exit_order_test(symbol,strategy, exchange,short,**kwargs): \n try:\n pf= get_pf(strategy, exchange,short)\n ocap=get_order_capital(strategy, exchange,short)\n \n if kwargs.get(\"index\",False):\n index=Index.objects.get(symbol=symbol) #actually should be more complex\n if short:\n action=index.etf_short\n else:\n action=index.etf_long\n else:\n action=Action.objects.get(symbol=symbol)\n symbol2=action.symbol\n \n if symbol2 in pf.retrieve():\n c1 = Q(action=action)\n c2 = Q(active=True)\n \n order=Order.objects.filter(c1 & c2)\n #post telegram\n #price\n #profit\n if len(order)>0:\n order[0].exiting_date=timezone.now()\n order[0].active=False\n order[0].save()\n\n ocap.capital+=1 #also for short\n ocap.save()\n pf.remove(symbol2)\n pf.save()\n\n return True\n return False\n \n except Exception as msg:\n print(\"exception in \" + __name__)\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass\n\nclass Currency(models.Model):\n name=models.CharField(max_length=100, blank=False)\n symbol=models.CharField(max_length=100, blank=False,default=\"A\")\n \n def __str__(self):\n return self.name\n \nclass Fees(models.Model):\n name=models.CharField(max_length=100, blank=False, default=\"fee\")\n fixed=models.DecimalField(max_digits=100, decimal_places=5)\n percent=models.DecimalField(max_digits=100, decimal_places=5)\n \n def __str__(self):\n return self.name \n \nclass StockEx(models.Model):\n name=models.CharField(max_length=100, blank=False)\n fees=models.ForeignKey('Fees',on_delete=models.CASCADE)\n ib_ticker=models.CharField(max_length=15, blank=True,default=\"AAA\")\n opening_time=models.TimeField(default=\"09:00:00\")\n closing_time=models.TimeField(default=\"17:00:00\")\n \n def __str__(self):\n return self.name \n\n\nclass Strategy(models.Model):\n name=models.CharField(max_length=100, blank=False)\n \n def __str__(self):\n return self.name\n\n### Index is like action, but it had to be separated, as an index cannot be bought directly\nclass Index(models.Model):\n symbol=models.CharField(max_length=15, blank=False, primary_key=True)\n ib_ticker=models.CharField(max_length=15, blank=True,default=\"AAA\")\n name=models.CharField(max_length=100, blank=False)\n stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE)\n currency=models.ForeignKey('Currency',on_delete=models.CASCADE)\n etf_long=models.ForeignKey('Action',on_delete=models.PROTECT,default=0,related_name='etf_long')\n etf_short=models.ForeignKey('Action',on_delete=models.PROTECT, default=0,related_name='etf_short')\n \n class Meta:\n ordering = [\"name\"]\n\n def ib_ticker(self):\n return self.ib_ticker\n \n def __str__(self):\n return self.name \n\nclass Action(models.Model):\n symbol=models.CharField(max_length=15, blank=False, primary_key=True)\n ib_ticker=models.CharField(max_length=15, blank=True,default=\"AAA\")\n name=models.CharField(max_length=100, blank=False)\n stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE)\n currency=models.ForeignKey('Currency',on_delete=models.CASCADE)\n category=models.ForeignKey('ActionCategory',on_delete=models.CASCADE,blank=True)\n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=0)\n \n class Meta:\n ordering = [\"name\"]\n \n def ib_ticker(self):\n t=self.symbol.split(\".\")\n return t[0] \n \n def __str__(self):\n return self.name\n\nclass Order(models.Model):\n action=models.ForeignKey('Action',on_delete=models.CASCADE)\n pf=models.ForeignKey('PF',on_delete=models.SET_NULL,blank=True,null=True)\n active=models.BooleanField(blank=False,default=True)\n entering_date=models.DateTimeField(null=False, blank=False, auto_now_add=True)#default=timezone.now())\n exiting_date=models.DateTimeField(null=True, blank=True)\n entering_price=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n exiting_price=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n sl_threshold=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n profit=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n profit_percent=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n quantity=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n\n def __str__(self):\n return self.action.name + \" \"+ str(self.entering_date)\n\ndef pf_retrieve_all(**kwargs):\n arr=[]\n \n for pf in PF.objects.filter(short=kwargs.get(\"short\",False)):\n cat=ActionCategory.objects.get(short=\"ACT\")\n c1 = Q(category=cat)\n if kwargs.get(\"opening\")==\"9h\":\n stockEx1=StockEx.objects.filter(name=\"Paris\")\n stockEx2=StockEx.objects.filter(name=\"XETRA\")\n c2 = Q(stock_ex=stockEx1[0])\n c3 = Q(stock_ex=stockEx2[0])\n actions=pf.actions.filter(c1 & (c2|c3))\n elif kwargs.get(\"opening\")==\"15h\":\n stockEx1=StockEx.objects.filter(name=\"Nasdaq\")\n c2 = Q(stock_ex=stockEx1[0])\n actions=pf.actions.filter(c1 & c2)\n else:\n actions=pf.actions.filter(c1)\n \n for action in actions:\n if not action.symbol in arr:\n arr.append(action.symbol)\n return arr\n\n### Portfolio for a given strategy (used as name presently)\nclass PF(models.Model):\n # can be replaced with ib.positions() or ib.portfolio()\n name=models.CharField(max_length=100, blank=False)\n actions=models.ManyToManyField(Action,blank=True)\n short=models.BooleanField(blank=False,default=False)\n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)\n stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)\n \n def __str__(self):\n return self.name\n \n def retrieve(self):\n arr=[]\n for action in self.actions.all():\n arr.append(action.symbol)\n return arr\n\n def remove(self,symbol):\n a = Action.objects.get(symbol=symbol)\n \n try:\n self.actions.remove(a)\n self.save()\n except Exception as msg:\n print(\"exception in remove_symbol\")\n print(symbol)\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass\n\n def append(self,symbol):\n try:\n a = Action.objects.get(symbol=symbol)\n self.actions.add(a)\n self.save()\n except Exception as msg:\n print(\"exception in \" + __name__)\n print(symbol)\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass \n\ndef get_pf(strategy, exchange,short):\n s=Strategy.objects.get(name=strategy)\n e=StockEx.objects.get(name=exchange)\n\n c1 = Q(stock_ex=e)\n c2 = Q(strategy=s) \n c3 = Q(short=short)\n\n return PF.objects.get(c1 & c2 & c3)\n\n\n### To distinguish between ETF, actions, indexes...\nclass ActionCategory(models.Model):\n short=models.CharField(max_length=15, blank=False, default=\"AAA\", primary_key=True)\n name=models.CharField(max_length=100, blank=False)\n\n def __str__(self):\n return self.name \n\n###To define the capital assigned to one strategy.\n###Not used presently \nclass Capital(models.Model):\n #self.ib.accountSummary()\n capital=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n name=models.CharField(max_length=100, blank=False,default=\"\")\n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)\n stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)\n \n def __str__(self):\n return self.name \n\ndef get_capital(strategy, exchange,short):\n s=Strategy.objects.get(name=strategy)\n e=StockEx.objects.get(name=exchange)\n\n c1 = Q(stock_ex=e)\n c2 = Q(strategy=s) \n c3 = Q(short=short)\n\n return Capital.objects.get(c1 & c2 & c3)\n\n###To define the number of orders assigned to one strategy\n###1 means that only one action can be owned at a time using this strategy\n\nclass OrderCapital(models.Model):\n capital=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)\n name=models.CharField(max_length=100, blank=False,default=\"\")\n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)\n stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)\n \n def __str__(self):\n return self.name \n\ndef get_order_capital(strategy, exchange,short):\n s=Strategy.objects.get(name=strategy)\n e=StockEx.objects.get(name=exchange)\n\n c1 = Q(stock_ex=e)\n c2 = Q(strategy=s) \n\n return OrderCapital.objects.get(c1 & c2)\n\n###For strategy using two time frame, in the slow one (10 days) candidates are defined\n###And on daily basis the other strategy decides which of the candidate is really bought or sold\n\nclass Candidates(models.Model):\n name=models.CharField(max_length=100, blank=False)\n actions=models.ManyToManyField(Action,blank=True) \n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=1)\n stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)\n \n def reset(self):\n for a in self.actions.all():\n self.actions.remove(a)\n self.save()\n \n def append(self,symbol): #so we can name as for list\n a = Action.objects.get(symbol=symbol)\n self.actions.add(a)\n self.save()\n \n def retrieve(self):\n arr=[]\n for action in self.actions.all():\n arr.append(action.symbol)\n return arr\n \n def __str__(self):\n return self.name \n\ndef get_candidates(strategy, exchange):\n s=Strategy.objects.get(name=strategy)\n e=StockEx.objects.get(name=exchange)\n\n c1 = Q(stock_ex=e)\n c2 = Q(strategy=s) \n\n return Candidates.objects.get(c1 & c2)\n \n### List of actions provisory excluded for a strategy as it risks to perform bad\n \nclass Excluded(models.Model):\n name=models.CharField(max_length=100, blank=False)\n actions=models.ManyToManyField(Action,blank=True) \n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)\n \n def reset(self):\n for a in self.actions.all():\n self.actions.remove(a)\n self.save()\n \n def append(self,symbol):\n a = Action.objects.get(symbol=symbol)\n self.actions.add(a)\n self.save()\n \n def remove(self,symbol):\n a = Action.objects.get(symbol=symbol)\n \n try:\n self.actions.remove(a)\n self.save()\n except Exception as msg:\n print(\"exception in \" + __name__)\n print(symbol)\n print(msg)\n _, e_, exc_tb = sys.exc_info()\n print(\"line \" + str(exc_tb.tb_lineno))\n pass\n \n def retrieve(self):\n arr=[]\n for action in self.actions.all():\n arr.append(action.symbol)\n return arr\n \n def __str__(self):\n return self.name \n \n### Define a list of actions and indexes that can be traded using the defined strategy\nclass StratCandidates(models.Model):\n name=models.CharField(max_length=100, blank=False)\n actions=models.ManyToManyField(Action,blank=True) \n indexes=models.ManyToManyField(Index,blank=True) \n strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=0)\n \n def retrieve(self):\n arr=[]\n for action in self.actions.all():\n arr.append(action.symbol)\n return arr \n \n def __str__(self):\n return self.name " ]
[ [ "pandas.DataFrame", "numpy.shape" ] ]
johnson7788/lit
[ "3eb824b01e0f72a5486124b16056bf912465debc" ]
[ "lit_nlp/examples/sst_pytorch_demo.py" ]
[ "# Lint as: python3\nr\"\"\"Code example for a custom model, using PyTorch.\n\nThis demo shows how to use a custom model with LIT, in just a few lines of code.\nWe'll use a transformers model, with a minimal amount of code to implement the\nLIT API. Compared to models/glue_models.py, this has fewer features, but the\ncode is more readable.\nThis demo is similar in functionality to simple_tf2_demo.py, but uses PyTorch\ninstead of TensorFlow 2.\nThe transformers library can load weights from either,\nso you can use any saved model compatible with the underlying model class\n(AutoModelForSequenceClassification). To train something for this demo, you can:\n- Use quickstart_sst_demo.py, and set --model_path to somewhere durable\n- Or: Use tools/glue_trainer.py\n- Or: Use any fine-tuning code that works with transformers, such as\nhttps://github.com/huggingface/transformers#quick-tour-of-the-fine-tuningusage-scripts\nTo run locally:\n python -m lit_nlp.examples.simple_pytorch_demo \\\n --port=5432 --model_path=/path/to/saved/model\nThen navigate to localhost:5432 to access the demo UI.\nNOTE: this demo still uses TensorFlow Datasets (which depends on TensorFlow) to\nload the data. However, the output of glue.SST2Data is just NumPy arrays and\nplain Python data, and you can easily replace this with a different library or\ndirectly loading from CSV.\n\"\"\"\nimport re\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom lit_nlp import dev_server\nfrom lit_nlp import server_flags\nfrom lit_nlp.api import model as lit_model\nfrom lit_nlp.api import types as lit_types\nfrom lit_nlp.examples.datasets import glue\nfrom lit_nlp.lib import utils\nimport torch\nimport transformers\n\n# NOTE: additional flags defined in server_flags.py\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n \"model_path\", None,\n \"Path to trained model, in standard transformers format, e.g. as \"\n \"saved by model.save_pretrained() and tokenizer.save_pretrained()\")\n\n\ndef _from_pretrained(cls, *args, **kw):\n \"\"\"Load a transformers model in PyTorch, with fallback to TF2/Keras weights.\"\"\"\n try:\n return cls.from_pretrained(*args, **kw)\n except OSError as e:\n logging.warning(\"Caught OSError loading model: %s\", e)\n logging.warning(\n \"Re-trying to convert from TensorFlow checkpoint (from_tf=True)\")\n return cls.from_pretrained(*args, from_tf=True, **kw)\n\n\nclass SimpleSentimentModel(lit_model.Model):\n \"\"\"Simple sentiment analysis model.\"\"\"\n\n LABELS = [\"0\", \"1\"] # negative, positive\n compute_grads: bool = True # if True, compute and return gradients.\n\n def __init__(self, model_name_or_path):\n self.tokenizer = transformers.AutoTokenizer.from_pretrained(\n model_name_or_path)\n model_config = transformers.AutoConfig.from_pretrained(\n model_name_or_path,\n num_labels=2,\n output_hidden_states=True,\n output_attentions=True,\n )\n # This is a just a regular PyTorch model.\n self.model = _from_pretrained(\n transformers.AutoModelForSequenceClassification,\n model_name_or_path,\n config=model_config)\n self.model.eval()\n\n ##\n # LIT API implementation\n def max_minibatch_size(self):\n # This tells lit_model.Model.predict() how to batch inputs to\n # predict_minibatch().\n # Alternately, you can just override predict() and handle batching yourself.\n return 32\n\n def predict_minibatch(self, inputs):\n\n # Preprocess to ids and masks, and make the input batch.\n encoded_input = self.tokenizer.batch_encode_plus(\n [ex[\"sentence\"] for ex in inputs],\n return_tensors=\"pt\",\n add_special_tokens=True,\n max_length=128,\n padding=\"longest\",\n truncation=\"longest_first\")\n\n # Check and send to cuda (GPU) if available\n if torch.cuda.is_available():\n self.model.cuda()\n for tensor in encoded_input:\n encoded_input[tensor] = encoded_input[tensor].cuda()\n\n # Run a forward pass.\n with torch.set_grad_enabled(self.compute_grads):\n out: transformers.modeling_outputs.SequenceClassifierOutput = \\\n self.model(**encoded_input)\n\n # Post-process outputs.\n batched_outputs = {\n \"probas\": torch.nn.functional.softmax(out.logits, dim=-1),\n \"input_ids\": encoded_input[\"input_ids\"],\n \"ntok\": torch.sum(encoded_input[\"attention_mask\"], dim=1),\n \"cls_emb\": out.hidden_states[-1][:, 0], # last layer, first token\n }\n\n # Add attention layers to batched_outputs\n assert len(out.attentions) == self.model.config.num_hidden_layers\n for i, layer_attention in enumerate(out.attentions):\n batched_outputs[f\"layer_{i}/attention\"] = layer_attention\n\n # Request gradients after the forward pass.\n # Note: hidden_states[0] includes position and segment encodings, as well as\n # subword embeddings.\n if self.compute_grads:\n # <torch.float32>[batch_size, num_tokens, emb_dim]\n scalar_pred_for_gradients = torch.max(\n batched_outputs[\"probas\"], dim=1, keepdim=False, out=None)[0]\n batched_outputs[\"input_emb_grad\"] = torch.autograd.grad(\n scalar_pred_for_gradients,\n out.hidden_states[0],\n grad_outputs=torch.ones_like(scalar_pred_for_gradients))[0]\n\n # Post-process outputs.\n # Return as NumPy for further processing.\n detached_outputs = {\n k: v.cpu().detach().numpy() for k, v in batched_outputs.items()}\n\n # Unbatch outputs so we get one record per input example.\n for output in utils.unbatch_preds(detached_outputs):\n ntok = output.pop(\"ntok\")\n output[\"tokens\"] = self.tokenizer.convert_ids_to_tokens(\n output.pop(\"input_ids\")[:ntok])\n\n # set token gradients\n if self.compute_grads:\n output[\"token_grad_sentence\"] = output[\"input_emb_grad\"][:ntok]\n\n # Process attention.\n for key in output:\n if not re.match(r\"layer_(\\d+)/attention\", key):\n continue\n # Select only real tokens, since most of this matrix is padding.\n # <float32>[num_heads, max_seq_length, max_seq_length]\n # -> <float32>[num_heads, num_tokens, num_tokens]\n output[key] = output[key][:, :ntok, :ntok].transpose((0, 2, 1))\n # Make a copy of this array to avoid memory leaks, since NumPy otherwise\n # keeps a pointer around that prevents the source array from being GCed.\n output[key] = output[key].copy()\n yield output\n\n def input_spec(self) -> lit_types.Spec:\n return {\n \"sentence\": lit_types.TextSegment(),\n \"label\": lit_types.CategoryLabel(vocab=self.LABELS, required=False)\n }\n\n def output_spec(self) -> lit_types.Spec:\n ret = {\n \"tokens\": lit_types.Tokens(),\n \"probas\": lit_types.MulticlassPreds(parent=\"label\", vocab=self.LABELS),\n \"cls_emb\": lit_types.Embeddings()\n }\n # Gradients, if requested.\n if self.compute_grads:\n ret[\"token_grad_sentence\"] = lit_types.TokenGradients(\n align=\"tokens\")\n\n # Attention heads, one field for each layer.\n for i in range(self.model.config.num_hidden_layers):\n ret[f\"layer_{i}/attention\"] = lit_types.AttentionHeads(\n align_in=\"tokens\", align_out=\"tokens\")\n return ret\n\n\ndef main(_):\n # Normally path is a directory; if it's an archive file, download and\n # extract to the transformers cache.\n model_path = FLAGS.model_path\n if model_path.endswith(\".tar.gz\"):\n model_path = transformers.file_utils.cached_path(\n model_path, extract_compressed_file=True)\n\n # Load the model we defined above.\n models = {\"sst\": SimpleSentimentModel(model_path)}\n # Load SST-2 validation set from TFDS.\n datasets = {\"sst_dev\": glue.SST2Data(\"validation\")}\n\n # Start the LIT server. See server_flags.py for server options.\n lit_demo = dev_server.Server(models, datasets, **server_flags.get_flags())\n lit_demo.serve()\n\n\nif __name__ == \"__main__\":\n app.run(main)\n" ]
[ [ "torch.sum", "torch.ones_like", "torch.set_grad_enabled", "torch.nn.functional.softmax", "torch.cuda.is_available", "torch.max" ] ]
dataength/automating-your-data-pipeline-with-apache-airflow
[ "90a1351de6de78c0f0a6fb2e778e2ba3b7c78f5e" ]
[ "machine-learning-pipeline/airflow/dags/train_simple_model.py" ]
[ "import pickle\n\nfrom airflow import DAG\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils import timezone\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndefault_args = {\n 'owner': 'ODDS',\n}\ndag = DAG(\n 'train_simple_model',\n schedule_interval='*/15 * * * *',\n default_args=default_args,\n start_date=timezone.datetime(2020, 8, 1),\n catchup=False\n)\n\nstart = DummyOperator(task_id='start', dag=dag)\n\n\ndef train_func():\n clf = RandomForestClassifier(random_state=0)\n X = [[ 1, 2, 3],\n [11, 12, 13]]\n y = [0, 1]\n clf.fit(X, y)\n\n MODEL_PATH = '/Users/zkan/Projects/dataength/' \\\n 'automating-your-data-pipeline-with-apache-airflow/' \\\n 'machine-learning-pipeline/airflow/dags'\n\n with open(f'{MODEL_PATH}/models/clf.model', 'wb') as outfile:\n pickle.dump(clf, outfile)\n\n\ntrain = PythonOperator(\n task_id='train',\n python_callable=train_func,\n dag=dag,\n)\n\nend = DummyOperator(task_id='end', dag=dag)\n\nstart >> train >> end\n" ]
[ [ "sklearn.ensemble.RandomForestClassifier" ] ]
toddlerya/AnalyzeNPC
[ "5d16f994ec34300a3050463aad08ad3a1ec1eaba" ]
[ "nighteen_cpc.py" ]
[ "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author: toddler\n\nimport jieba\nimport re\nimport os\nfrom collections import Counter\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\n\n\ndef cut_analyze(input_file):\n \"\"\"\n :param input_file: 输入带切词分析的文本路径\n :return: (list1, list2) list1切词处理后的列表结果, list2输出切词处理排序后的词频结果, 列表-元祖嵌套结果\n \"\"\"\n cpc_dict_path = u'user_dict/cpc_dictionary.txt'\n stop_words_path = u'user_dict/stopword.txt'\n\n with open(input_file) as f:\n content = f.read()\n\n with open(stop_words_path) as sf:\n st_content = sf.readlines()\n\n jieba.load_userdict(cpc_dict_path) # 加载针对全国人民代表大会的分词词典\n\n stop_words = [line.strip().decode('utf-8') for line in st_content] # 将读取的数据都转为unicode处理\n\n seg_list = jieba.cut(content, cut_all=False) # 精确模式\n\n filter_seg_list = list()\n\n for seg in seg_list:\n goal_word = ''.join(re.findall(u'[\\u4e00-\\u9fa5]+', seg)).strip() # 过滤所有非中文字符内容\n if len(goal_word) != 0 and not stop_words.__contains__(goal_word): # 过滤分词结果中的停词内容\n # filter_seg_list.append(goal_word.encode('utf-8')) # 将unicode的文本转为utf-8保存到列表以备后续处理\n filter_seg_list.append(goal_word)\n\n seg_counter_all = Counter(filter_seg_list).most_common() # 对切词结果按照词频排序\n\n # for item in seg_counter_all:\n # print \"词语: {0} - 频数: {1}\".format(item[0].encode('utf-8'), item[1])\n\n return filter_seg_list, seg_counter_all\n\n\ndef main():\n input_file_path = u'input_file/nighteen-cpc.txt'\n cut_data, sort_data = cut_analyze(input_file=input_file_path)\n font = os.path.abspath('assets/msyh.ttf')\n wc = WordCloud(collocations=False, font_path=font, width=3600, height=3600, margin=2)\n wc.generate_from_frequencies(dict(sort_data))\n plt.figure()\n plt.imshow(wc)\n plt.axis('off')\n plt.show()\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "matplotlib.pyplot.axis" ] ]
ThomasHoppe/concept_formation
[ "2468fea78ba46804bf44228519eb33ebc5780d31" ]
[ "concept_formation/tests/benchmark_cobweb.py" ]
[ "from random import randint\nfrom timeit import timeit\n\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as mpatches\n\n\ndef generate_dataset(n_inst, n_attr, n_val):\n instances = []\n for i in range(n_inst):\n i = {}\n for j in range(n_attr):\n i[str(j)] = randint(1, n_val)\n instances.append(i)\n return instances\n\n\ndef time(n_inst, n_attr, n_val):\n return timeit('tree.fit(x)',\n setup=('from __main__ import generate_dataset; '\n 'from concept_formation.cobweb import CobwebTree; '\n 'tree = CobwebTree(); '\n 'x = generate_dataset(%i, %i, %i)' % (n_inst, n_attr,\n n_val)),\n number=1)\n\n\nif __name__ == \"__main__\":\n # 5 attributes\n sizes = [10, 30, 60, 120, 180, 220]\n times = [time(i, 5, 5) for i in sizes]\n plt.plot(sizes, times, 'ro')\n plt.plot(sizes, times, 'r-')\n\n # 10 attributes\n times = [time(i, 10, 5) for i in sizes]\n plt.plot(sizes, times, 'bo')\n plt.plot(sizes, times, 'b-')\n\n # 20 attributes\n times = [time(i, 20, 5) for i in sizes]\n plt.plot(sizes, times, 'go')\n plt.plot(sizes, times, 'g-')\n\n red_patch = mpatches.Patch(color='red', label='# attr=5')\n blue_patch = mpatches.Patch(color='blue', label='# attr=10')\n green_patch = mpatches.Patch(color='green', label='# attr=20')\n plt.legend(handles=[red_patch, blue_patch, green_patch], loc=2)\n\n plt.xlabel('Number of training instances (5 possible values / attr)')\n plt.ylabel('Runtime in Seconds')\n plt.show()\n" ]
[ [ "matplotlib.patches.Patch", "matplotlib.pyplot.legend", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
ljw23/ConvLab-2
[ "13d48ea0e441701bd66100689b6c25b561f15525", "13d48ea0e441701bd66100689b6c25b561f15525" ]
[ "convlab2/policy/larl/multiwoz/latent_dialog/enc2dec/decoders.py", "convlab2/e2e/rnn_rollout/engines/selection_engine.py" ]
[ "import torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport numpy as np\nfrom convlab2.policy.larl.multiwoz.latent_dialog.enc2dec.base_modules import BaseRNN\nfrom convlab2.policy.larl.multiwoz.latent_dialog.utils import cast_type, LONG, FLOAT\nfrom convlab2.policy.larl.multiwoz.latent_dialog.corpora import DECODING_MASKED_TOKENS, EOS\n\n\nTEACH_FORCE = 'teacher_forcing'\nTEACH_GEN = 'teacher_gen'\nGEN = 'gen'\nGEN_VALID = 'gen_valid'\n\n\nclass Attention(nn.Module):\n def __init__(self, dec_cell_size, ctx_cell_size, attn_mode, project):\n super(Attention, self).__init__()\n self.dec_cell_size = dec_cell_size\n self.ctx_cell_size = ctx_cell_size\n self.attn_mode = attn_mode\n if project:\n self.linear_out = nn.Linear(\n dec_cell_size+ctx_cell_size, dec_cell_size)\n else:\n self.linear_out = None\n\n if attn_mode == 'general':\n self.dec_w = nn.Linear(dec_cell_size, ctx_cell_size)\n elif attn_mode == 'cat':\n self.dec_w = nn.Linear(dec_cell_size, dec_cell_size)\n self.attn_w = nn.Linear(ctx_cell_size, dec_cell_size)\n self.query_w = nn.Linear(dec_cell_size, 1)\n\n def forward(self, output, context):\n # output: (batch_size, output_seq_len, dec_cell_size)\n # context: (batch_size, max_ctx_len, ctx_cell_size)\n batch_size = output.size(0)\n max_ctx_len = context.size(1)\n\n if self.attn_mode == 'dot':\n # (batch_size, output_seq_len, max_ctx_len)\n attn = th.bmm(output, context.transpose(1, 2))\n elif self.attn_mode == 'general':\n # (batch_size, output_seq_len, ctx_cell_size)\n mapped_output = self.dec_w(output)\n # (batch_size, output_seq_len, max_ctx_len)\n attn = th.bmm(mapped_output, context.transpose(1, 2))\n elif self.attn_mode == 'cat':\n # (batch_size, output_seq_len, dec_cell_size)\n mapped_output = self.dec_w(output)\n # (batch_size, max_ctx_len, dec_cell_size)\n mapped_attn = self.attn_w(context)\n # (batch_size, output_seq_len, max_ctx_len, dec_cell_size)\n tiled_output = mapped_output.unsqueeze(\n 2).repeat(1, 1, max_ctx_len, 1)\n # (batch_size, 1, max_ctx_len, dec_cell_size)\n tiled_attn = mapped_attn.unsqueeze(1)\n # (batch_size, output_seq_len, max_ctx_len, dec_cell_size)\n fc1 = F.tanh(tiled_output+tiled_attn)\n # (batch_size, otuput_seq_len, max_ctx_len)\n attn = self.query_w(fc1).squeeze(-1)\n else:\n raise ValueError('Unknown attention mode')\n\n # TODO mask\n # if self.mask is not None:\n\n # (batch_size, output_seq_len, max_ctx_len)\n attn = F.softmax(attn.view(-1, max_ctx_len),\n dim=1).view(batch_size, -1, max_ctx_len)\n # (batch_size, output_seq_len, ctx_cell_size)\n mix = th.bmm(attn, context)\n # (batch_size, output_seq_len, dec_cell_size+ctx_cell_size)\n combined = th.cat((mix, output), dim=2)\n if self.linear_out is None:\n return combined, attn\n else:\n output = F.tanh(\n self.linear_out(combined.view(-1, self.dec_cell_size+self.ctx_cell_size))).view(\n batch_size, -1, self.dec_cell_size) # (batch_size, output_seq_len, dec_cell_size)\n return output, attn\n\n\nclass DecoderRNN(BaseRNN):\n def __init__(self, input_dropout_p, rnn_cell, input_size, hidden_size, num_layers, output_dropout_p,\n bidirectional, vocab_size, use_attn, ctx_cell_size, attn_mode, sys_id, eos_id, use_gpu,\n max_dec_len, embedding=None):\n\n super(DecoderRNN, self).__init__(input_dropout_p=input_dropout_p,\n rnn_cell=rnn_cell,\n input_size=input_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n output_dropout_p=output_dropout_p,\n bidirectional=bidirectional)\n\n # TODO embedding is None or not\n if embedding is None:\n self.embedding = nn.Embedding(vocab_size, input_size)\n else:\n self.embedding = embedding\n\n # share parameters between encoder and decoder\n # self.rnn = ctx_encoder.rnn\n # self.FC = nn.Linear(input_size, utt_encoder.output_size)\n\n self.use_attn = use_attn\n if self.use_attn:\n self.attention = Attention(dec_cell_size=hidden_size,\n ctx_cell_size=ctx_cell_size,\n attn_mode=attn_mode,\n project=True)\n\n self.dec_cell_size = hidden_size\n self.output_size = vocab_size\n self.project = nn.Linear(self.dec_cell_size, self.output_size)\n self.log_softmax = F.log_softmax\n\n self.sys_id = sys_id\n self.eos_id = eos_id\n self.use_gpu = use_gpu\n self.max_dec_len = max_dec_len\n\n def forward(self, batch_size, dec_inputs, dec_init_state, attn_context, mode, gen_type, beam_size, goal_hid=None):\n # dec_inputs: (batch_size, response_size-1)\n # attn_context: (batch_size, max_ctx_len, ctx_cell_size)\n # goal_hid: (batch_size, goal_nhid)\n\n ret_dict = dict()\n\n if self.use_attn:\n ret_dict[DecoderRNN.KEY_ATTN_SCORE] = list()\n\n if mode == GEN:\n dec_inputs = None\n\n if gen_type != 'beam':\n beam_size = 1\n\n if dec_inputs is not None:\n decoder_input = dec_inputs\n else:\n # prepare the BOS inputs\n with th.no_grad():\n bos_var = Variable(th.LongTensor([self.sys_id]))\n bos_var = cast_type(bos_var, LONG, self.use_gpu)\n decoder_input = bos_var.expand(\n batch_size*beam_size, 1) # (batch_size, 1)\n\n if mode == GEN and gen_type == 'beam':\n # TODO if beam search, repeat the initial states of the RNN\n pass\n else:\n decoder_hidden_state = dec_init_state\n\n # list of logprob | max_dec_len*(batch_size, 1, vocab_size)\n prob_outputs = []\n symbol_outputs = [] # list of word ids | max_dec_len*(batch_size, 1)\n # back_pointers = []\n # lengths = blabla...\n\n def decode(step, cum_sum, step_output, step_attn):\n prob_outputs.append(step_output)\n step_output_slice = step_output.squeeze(\n 1) # (batch_size, vocab_size)\n if self.use_attn:\n ret_dict[DecoderRNN.KEY_ATTN_SCORE].append(step_attn)\n\n if gen_type == 'greedy':\n _, symbols = step_output_slice.topk(1) # (batch_size, 1)\n elif gen_type == 'sample':\n # TODO FIXME\n # symbols = self.gumbel_max(step_output_slice)\n pass\n elif gen_type == 'beam':\n # TODO\n pass\n else:\n raise ValueError('Unsupported decoding mode')\n\n symbol_outputs.append(symbols)\n\n return cum_sum, symbols\n\n if mode == TEACH_FORCE:\n prob_outputs, decoder_hidden_state, attn = self.forward_step(\n input_var=decoder_input, hidden_state=decoder_hidden_state, encoder_outputs=attn_context, goal_hid=goal_hid)\n else:\n # do free running here\n cum_sum = None\n for step in range(self.max_dec_len):\n # Input:\n # decoder_input: (batch_size, 1)\n # decoder_hidden_state: tuple: (h, c)\n # attn_context: (batch_size, max_ctx_len, ctx_cell_size)\n # goal_hid: (batch_size, goal_nhid)\n # Output:\n # decoder_output: (batch_size, 1, vocab_size)\n # decoder_hidden_state: tuple: (h, c)\n # step_attn: (batch_size, 1, max_ctx_len)\n decoder_output, decoder_hidden_state, step_attn = self.forward_step(\n decoder_input, decoder_hidden_state, attn_context, goal_hid=goal_hid)\n cum_sum, symbols = decode(\n step, cum_sum, decoder_output, step_attn)\n decoder_input = symbols\n\n # (batch_size, max_dec_len, vocab_size)\n prob_outputs = th.cat(prob_outputs, dim=1)\n\n # back tracking to recover the 1-best in beam search\n # if gen_type == 'beam':\n\n ret_dict[DecoderRNN.KEY_SEQUENCE] = symbol_outputs\n\n # prob_outputs: (batch_size, max_dec_len, vocab_size)\n # decoder_hidden_state: tuple: (h, c)\n # ret_dict[DecoderRNN.KEY_ATTN_SCORE]: max_dec_len*(batch_size, 1, max_ctx_len)\n # ret_dict[DecoderRNN.KEY_SEQUENCE]: max_dec_len*(batch_size, 1)\n return prob_outputs, decoder_hidden_state, ret_dict\n\n def forward_step(self, input_var, hidden_state, encoder_outputs, goal_hid):\n # input_var: (batch_size, response_size-1 i.e. output_seq_len)\n # hidden_state: tuple: (h, c)\n # encoder_outputs: (batch_size, max_ctx_len, ctx_cell_size)\n # goal_hid: (batch_size, goal_nhid)\n batch_size, output_seq_len = input_var.size()\n # (batch_size, output_seq_len, embedding_dim)\n embedded = self.embedding(input_var)\n\n # add goals\n if goal_hid is not None:\n # (batch_size, 1, goal_nhid)\n goal_hid = goal_hid.view(goal_hid.size(0), 1, goal_hid.size(1))\n # (batch_size, output_seq_len, goal_nhid)\n goal_rep = goal_hid.repeat(1, output_seq_len, 1)\n # (batch_size, output_seq_len, embedding_dim+goal_nhid)\n embedded = th.cat([embedded, goal_rep], dim=2)\n\n embedded = self.input_dropout(embedded)\n\n # ############\n # embedded = self.FC(embedded.view(-1, embedded.size(-1))).view(batch_size, output_seq_len, -1)\n\n # output: (batch_size, output_seq_len, dec_cell_size)\n # hidden: tuple: (h, c)\n output, hidden_s = self.rnn(embedded, hidden_state)\n\n attn = None\n if self.use_attn:\n # output: (batch_size, output_seq_len, dec_cell_size)\n # encoder_outputs: (batch_size, max_ctx_len, ctx_cell_size)\n # attn: (batch_size, output_seq_len, max_ctx_len)\n output, attn = self.attention(output, encoder_outputs)\n\n # (batch_size*output_seq_len, vocab_size)\n logits = self.project(output.contiguous().view(-1, self.dec_cell_size))\n prediction = self.log_softmax(logits, dim=logits.dim(\n )-1).view(batch_size, output_seq_len, -1) # (batch_size, output_seq_len, vocab_size)\n return prediction, hidden_s, attn\n\n # special for rl\n def _step(self, input_var, hidden_state, encoder_outputs, goal_hid):\n # input_var: (1, 1)\n # hidden_state: tuple: (h, c)\n # encoder_outputs: (1, max_dlg_len, dlg_cell_size)\n # goal_hid: (1, goal_nhid)\n batch_size, output_seq_len = input_var.size()\n embedded = self.embedding(input_var) # (1, 1, embedding_dim)\n\n if goal_hid is not None:\n goal_hid = goal_hid.view(goal_hid.size(\n 0), 1, goal_hid.size(1)) # (1, 1, goal_nhid)\n goal_rep = goal_hid.repeat(\n 1, output_seq_len, 1) # (1, 1, goal_nhid)\n # (1, 1, embedding_dim+goal_nhid)\n embedded = th.cat([embedded, goal_rep], dim=2)\n\n embedded = self.input_dropout(embedded)\n\n # ############\n # embedded = self.FC(embedded.view(-1, embedded.size(-1))).view(batch_size, output_seq_len, -1)\n\n # output: (1, 1, dec_cell_size)\n # hidden: tuple: (h, c)\n output, hidden_s = self.rnn(embedded, hidden_state)\n\n attn = None\n if self.use_attn:\n # output: (1, 1, dec_cell_size)\n # encoder_outputs: (1, max_dlg_len, dlg_cell_size)\n # attn: (1, 1, max_dlg_len)\n output, attn = self.attention(output, encoder_outputs)\n\n # (1*1, vocab_size)\n logits = self.project(output.view(-1, self.dec_cell_size))\n prediction = logits.view(\n batch_size, output_seq_len, -1) # (1, 1, vocab_size)\n # prediction = self.log_softmax(logits, dim=logits.dim()-1).view(batch_size, output_seq_len, -1) # (batch_size, output_seq_len, vocab_size)\n return prediction, hidden_s\n\n # special for rl\n def write(self, input_var, hidden_state, encoder_outputs, max_words, vocab, stop_tokens, goal_hid=None, mask=True,\n decoding_masked_tokens=DECODING_MASKED_TOKENS):\n # input_var: (1, 1)\n # hidden_state: tuple: (h, c)\n # encoder_outputs: max_dlg_len*(1, 1, dlg_cell_size)\n # goal_hid: (1, goal_nhid)\n logprob_outputs = [] # list of logprob | max_dec_len*(1, )\n symbol_outputs = [] # list of word ids | max_dec_len*(1, )\n decoder_input = input_var\n decoder_hidden_state = hidden_state\n if type(encoder_outputs) is list:\n # (1, max_dlg_len, dlg_cell_size)\n encoder_outputs = th.cat(encoder_outputs, 1)\n # print('encoder_outputs.size() = {}'.format(encoder_outputs.size()))\n\n if mask:\n special_token_mask = Variable(th.FloatTensor(\n [-999. if token in decoding_masked_tokens else 0. for token in vocab]))\n special_token_mask = cast_type(\n special_token_mask, FLOAT, self.use_gpu) # (vocab_size, )\n\n def _sample(dec_output, num_i):\n # dec_output: (1, 1, vocab_size), need to softmax and log_softmax\n dec_output = dec_output.view(-1) # (vocab_size, )\n # TODO temperature\n prob = F.softmax(dec_output/0.6, dim=0) # (vocab_size, )\n logprob = F.log_softmax(dec_output, dim=0) # (vocab_size, )\n symbol = prob.multinomial(num_samples=1).detach() # (1, )\n # _, symbol = prob.topk(1) # (1, )\n _, tmp_symbol = prob.topk(1) # (1, )\n # print('multinomial symbol = {}, prob = {}'.format(symbol, prob[symbol.item()]))\n # print('topk symbol = {}, prob = {}'.format(tmp_symbol, prob[tmp_symbol.item()]))\n logprob = logprob.gather(0, symbol) # (1, )\n return logprob, symbol\n\n for i in range(max_words):\n decoder_output, decoder_hidden_state = self._step(\n decoder_input, decoder_hidden_state, encoder_outputs, goal_hid)\n # disable special tokens from being generated in a normal turn\n if mask:\n decoder_output += special_token_mask.expand(1, 1, -1)\n logprob, symbol = _sample(decoder_output, i)\n logprob_outputs.append(logprob)\n symbol_outputs.append(symbol)\n decoder_input = symbol.view(1, -1)\n\n if vocab[symbol.item()] in stop_tokens:\n break\n\n assert len(logprob_outputs) == len(symbol_outputs)\n # logprob_list = [t.item() for t in logprob_outputs]\n logprob_list = logprob_outputs\n symbol_list = [t.item() for t in symbol_outputs]\n return logprob_list, symbol_list\n\n # For MultiWoz RL\n def forward_rl(self, batch_size, dec_init_state, attn_context, vocab, max_words, goal_hid=None, mask=True, temp=0.1):\n # prepare the BOS inputs\n with th.no_grad():\n bos_var = Variable(th.LongTensor([self.sys_id]))\n bos_var = cast_type(bos_var, LONG, self.use_gpu)\n decoder_input = bos_var.expand(batch_size, 1) # (1, 1)\n decoder_hidden_state = dec_init_state # tuple: (h, c)\n encoder_outputs = attn_context # (1, ctx_len, ctx_cell_size)\n\n logprob_outputs = [] # list of logprob | max_dec_len*(1, )\n symbol_outputs = [] # list of word ids | max_dec_len*(1, )\n\n if mask:\n special_token_mask = Variable(th.FloatTensor(\n [-999. if token in DECODING_MASKED_TOKENS else 0. for token in vocab]))\n special_token_mask = cast_type(\n special_token_mask, FLOAT, self.use_gpu) # (vocab_size, )\n\n def _sample(dec_output, num_i):\n # dec_output: (1, 1, vocab_size), need to softmax and log_softmax\n # (batch_size, vocab_size, )\n dec_output = dec_output.view(batch_size, -1)\n # (batch_size, vocab_size, )\n prob = F.softmax(dec_output/temp, dim=1)\n # (batch_size, vocab_size, )\n logprob = F.log_softmax(dec_output, dim=1)\n symbol = prob.multinomial(\n num_samples=1).detach() # (batch_size, 1)\n # _, symbol = prob.topk(1) # (1, )\n _, tmp_symbol = prob.topk(1) # (1, )\n # print('multinomial symbol = {}, prob = {}'.format(symbol, prob[symbol.item()]))\n # print('topk symbol = {}, prob = {}'.format(tmp_symbol, prob[tmp_symbol.item()]))\n logprob = logprob.gather(1, symbol) # (1, )\n return logprob, symbol\n\n stopped_samples = set()\n for i in range(max_words):\n decoder_output, decoder_hidden_state = self._step(\n decoder_input, decoder_hidden_state, encoder_outputs, goal_hid)\n # disable special tokens from being generated in a normal turn\n if mask:\n decoder_output += special_token_mask.expand(1, 1, -1)\n logprob, symbol = _sample(decoder_output, i)\n logprob_outputs.append(logprob)\n symbol_outputs.append(symbol)\n decoder_input = symbol.view(batch_size, -1)\n for b_id in range(batch_size):\n if vocab[symbol[b_id].item()] == EOS:\n stopped_samples.add(b_id)\n\n if len(stopped_samples) == batch_size:\n break\n\n assert len(logprob_outputs) == len(symbol_outputs)\n symbol_outputs = th.cat(\n symbol_outputs, dim=1).cpu().data.numpy().tolist()\n logprob_outputs = th.cat(logprob_outputs, dim=1)\n logprob_list = []\n symbol_list = []\n for b_id in range(batch_size):\n b_logprob = []\n b_symbol = []\n for t_id in range(logprob_outputs.shape[1]):\n symbol = symbol_outputs[b_id][t_id]\n if vocab[symbol] == EOS and t_id != 0:\n break\n\n b_symbol.append(symbol_outputs[b_id][t_id])\n b_logprob.append(logprob_outputs[b_id][t_id])\n\n logprob_list.append(b_logprob)\n symbol_list.append(b_symbol)\n\n # TODO backward compatible, if batch_size == 1, we remove the nested structure\n if batch_size == 1:\n logprob_list = logprob_list[0]\n symbol_list = symbol_list[0]\n\n return logprob_list, symbol_list\n", "# Copyright 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch.autograd import Variable\n\nfrom convlab2.e2e.rnn_rollout.engines import EngineBase, Criterion\n\n\nclass SelectionEngine(EngineBase):\n def __init__(self, model, args, verbose=False):\n super(SelectionEngine, self).__init__(model, args, verbose)\n self.sel_crit = Criterion(\n self.model.item_dict,\n bad_toks=['<disconnect>', '<disagree>'],\n reduction='mean' if args.sep_sel else 'none')\n\n def _forward(model, batch, sep_sel=False):\n ctx, _, inpts, lens, _, sel_tgt, rev_idxs, hid_idxs, _ = batch\n ctx = Variable(ctx)\n inpts = [Variable(inpt) for inpt in inpts]\n rev_idxs = [Variable(idx) for idx in rev_idxs]\n hid_idxs = [Variable(idx) for idx in hid_idxs]\n if sep_sel:\n sel_tgt = Variable(sel_tgt)\n else:\n sel_tgt = [Variable(t) for t in sel_tgt]\n\n # remove YOU:/THEM: from the end\n sel_out = model(inpts[:-1], lens[:-1], rev_idxs[:-1], hid_idxs[:-1], ctx)\n\n return sel_out, sel_tgt\n\n def train_batch(self, batch):\n sel_out, sel_tgt = SelectionEngine._forward(self.model, batch,\n sep_sel=self.args.sep_sel)\n loss = 0\n if self.args.sep_sel:\n loss = self.sel_crit(sel_out, sel_tgt)\n else:\n for out, tgt in zip(sel_out, sel_tgt):\n loss += self.sel_crit(out, tgt)\n loss /= sel_out[0].size(0)\n\n self.opt.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)\n self.opt.step()\n return loss.item()\n\n def valid_batch(self, batch):\n with torch.no_grad():\n sel_out, sel_tgt = SelectionEngine._forward(self.model, batch,\n sep_sel=self.args.sep_sel)\n loss = 0\n if self.args.sep_sel:\n loss = self.sel_crit(sel_out, sel_tgt)\n else:\n for out, tgt in zip(sel_out, sel_tgt):\n loss += self.sel_crit(out, tgt)\n loss /= sel_out[0].size(0)\n\n return 0, loss.item(), 0\n\n\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.FloatTensor", "torch.nn.Linear", "torch.nn.functional.tanh", "torch.nn.functional.softmax", "torch.no_grad", "torch.nn.Embedding", "torch.LongTensor", "torch.bmm", "torch.cat" ], [ "torch.autograd.Variable", "torch.no_grad" ] ]
Jramirezg/ThreatMapper
[ "af5fda3ff585f8728a7a0b48ae6818ed189e4dbf" ]
[ "deepfence_backend/tasks/task_scheduler.py" ]
[ "import arrow\nfrom config.app import celery_app, app\nfrom models.container_image_registry import RegistryCredential\nfrom models.scheduler import Scheduler\nfrom models.setting import Setting\nfrom croniter import croniter\nfrom utils import constants\nimport time\nfrom datetime import datetime\nfrom utils.helper import websocketio_channel_name_format, get_image_cve_status\nfrom config.redisconfig import redis\nfrom utils.esconn import ESConn\nfrom resource_models.node import Node\nfrom utils.reports import prepare_report_download, prepare_report_email_body\nfrom utils.response import set_response\nfrom flask import make_response\nimport json\nimport uuid\nfrom copy import deepcopy\nfrom utils.helper import get_all_scanned_node, get_all_scanned_images\nimport pandas as pd\nimport re\n\n\n@celery_app.task\ndef task_scheduler():\n with app.app_context():\n curr_time = arrow.now(tz=\"+00:00\").datetime.replace(minute=0, second=0, microsecond=0)\n scheduled_tasks = Scheduler.query.filter_by(is_enabled=True).all()\n if not scheduled_tasks:\n return\n for scheduled_task in scheduled_tasks:\n if croniter.match(scheduled_task.cron_expr, curr_time):\n run_node_task(scheduled_task.action, scheduled_task.nodes, scheduled_task.id, scheduled_task.cron_expr)\n\n\ndef run_node_task(action, node_action_details, scheduler_id=None, cron_expr=None):\n with app.app_context():\n curr_time = arrow.now(tz=\"+00:00\").datetime\n if scheduler_id:\n try:\n scheduled_task = Scheduler.query.get(scheduler_id)\n scheduled_task.last_ran_at = curr_time\n scheduled_task.status = \"running\"\n scheduled_task.save()\n except Exception as ex:\n app.logger.error(ex)\n return\n\n def save_scheduled_task_status(status):\n if scheduler_id:\n try:\n scheduled_task = Scheduler.query.get(scheduler_id)\n scheduled_task.status = status\n scheduled_task.save()\n except Exception as ex:\n app.logger.error(ex)\n\n save_scheduled_task_status(\"In Progress\")\n node_type = node_action_details[\"node_type\"]\n df_id_to_scope_id_map = {}\n topology_data_df_format = {}\n registry_credential = None\n if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:\n try:\n registry_credential = RegistryCredential.query.get(\n node_action_details[\"registry_images\"][\"registry_id\"])\n except Exception as ex:\n save_scheduled_task_status(\"Error: \" + str(ex))\n app.logger.error(ex)\n return\n else:\n if not node_action_details.get(\"node_id_list\"):\n node_action_details[\"node_id_list\"] = []\n for i in range(3):\n try:\n redis_pipe = redis.pipeline()\n redis_pipe.hgetall(constants.DF_ID_TO_SCOPE_ID_REDIS_KEY_PREFIX + node_type.upper())\n redis_pipe.get(websocketio_channel_name_format(node_type + \"?format=deepfence\")[1])\n redis_resp = redis_pipe.execute()\n df_id_to_scope_id_map = redis_resp[0]\n if redis_resp[1]:\n topology_data_df_format = json.loads(redis_resp[1])\n if topology_data_df_format and df_id_to_scope_id_map:\n break\n else:\n app.logger.error(\"topology data is empty, retrying\")\n time.sleep(10)\n except Exception as ex:\n app.logger.error(ex)\n time.sleep(10)\n if action in [constants.NODE_ACTION_CVE_SCAN_START, constants.NODE_ACTION_SCHEDULE_CVE_SCAN]:\n if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:\n from config.app import celery_app\n redis_lock_keys = []\n redis_pipe = redis.pipeline()\n image_list_details_str = redis.get(\"{0}:{1}\".format(constants.REGISTRY_IMAGES_CACHE_KEY_PREFIX,\n node_action_details[\"registry_images\"][\n \"registry_id\"]))\n if image_list_details_str:\n if node_action_details[\"registry_images\"].get(\"all_registry_images\", False):\n image_dict = json.loads(image_list_details_str)\n image_df = pd.DataFrame(image_dict['image_list'])\n image_df['timestamp'] = pd.to_datetime(image_df.pushed_at)\n sorted_df = image_df.sort_values(by=['timestamp'], ascending=False)\n df_unique_list = sorted_df[\"image_tag\"].unique()\n df_unique = pd.DataFrame(data=df_unique_list, columns=[\"image_tag\"])\n sorted_df_by_image_tag = image_df.sort_values(\"image_tag\")\n images_by_tags = df_unique.merge(sorted_df_by_image_tag, on=[\"image_tag\"], how=\"outer\")[\n \"image_name_with_tag\"]\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"] = images_by_tags\n elif node_action_details[\"registry_images\"].get(\"only_new_images\", False):\n image_dict = json.loads(image_list_details_str)\n all_registry_images = set([image[\"image_name_with_tag\"] for image in image_dict['image_list']])\n if cron_expr:\n pattern = '^0.*?\\*/(\\d).*?$'\n match = re.search(pattern, cron_expr)\n if match:\n days_interval = int(match.group(1))\n else:\n days_interval = 1\n images_need_to_be_scanned = all_registry_images - get_all_scanned_images(days_interval)\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"] = list(\n images_need_to_be_scanned)\n elif node_action_details[\"registry_images\"].get(\"registry_scan_type\", None) == \"latest_timestamp\":\n image_dict = json.loads(image_list_details_str)\n image_df = pd.DataFrame(image_dict['image_list'])\n image_df['timestamp'] = pd.to_datetime(image_df.pushed_at)\n grouped = image_df.groupby(['image_name']).agg({\"timestamp\": max}).reset_index()\n latest_images_by_tags = image_df.merge(grouped, on=[\"image_name\", \"timestamp\"], how=\"inner\")[\n 'image_name_with_tag']\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"] = latest_images_by_tags\n elif node_action_details[\"registry_images\"].get(\"registry_scan_type\", None) == \"image_tags\":\n if node_action_details[\"registry_images\"].get(\"image_tags\", []):\n image_tags = node_action_details[\"registry_images\"].get(\"image_tags\", [])\n image_dict = json.loads(image_list_details_str)\n image_df = pd.DataFrame(image_dict['image_list'])\n images_by_tags = image_df[image_df[\"image_tag\"].isin(image_tags)][\"image_name_with_tag\"]\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"] = images_by_tags\n else:\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"] = []\n for image_name_with_tag in node_action_details[\"registry_images\"][\"image_name_with_tag_list\"]:\n lock_key = \"{0}:{1}\".format(constants.NODE_ACTION_CVE_SCAN_START, image_name_with_tag)\n redis_pipe.incr(lock_key)\n redis_lock_keys.append(lock_key)\n redis_resp = redis_pipe.execute()\n time.sleep(1)\n image_cve_status = get_image_cve_status()\n for i, image_name_with_tag in enumerate(\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"]):\n try:\n if redis_resp[i] != 1:\n continue\n cve_status = image_cve_status.get(image_name_with_tag, {}).get(\"action\", \"\")\n if cve_status:\n if cve_status == constants.CVE_SCAN_STATUS_QUEUED or cve_status in constants.CVE_SCAN_RUNNING_STATUS:\n continue\n datetime_now = datetime.now()\n scan_id = image_name_with_tag + \"_\" + datetime_now.strftime(\"%Y-%m-%dT%H:%M:%S\") + \".000\"\n body = {\n \"masked\": \"false\", \"type\": constants.CVE_SCAN_LOGS_INDEX, \"scan_id\": scan_id, \"host\": \"\",\n \"@timestamp\": datetime_now.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"), \"cve_scan_message\": \"\",\n \"action\": constants.CVE_SCAN_STATUS_QUEUED, \"host_name\": \"\", \"node_id\": image_name_with_tag,\n \"time_stamp\": int(time.time() * 1000.0), \"node_type\": constants.NODE_TYPE_CONTAINER_IMAGE\n }\n ESConn.create_doc(constants.CVE_SCAN_LOGS_INDEX, body)\n scan_details = {\n \"cve_node_id\": image_name_with_tag, \"scan_types\": node_action_details[\"scan_type\"],\n \"registry_type\": registry_credential.registry_type, \"scan_id\": scan_id,\n \"credential_id\": registry_credential.id}\n celery_task_id = \"cve_scan:\" + scan_id\n if node_action_details[\"registry_images\"].get(\"priority\", False):\n celery_app.send_task('tasks.vulnerability_scan_worker.vulnerability_scan', args=(),\n task_id=celery_task_id, kwargs={\"scan_details\": scan_details},\n queue=constants.VULNERABILITY_SCAN_PRIORITY_QUEUE)\n else:\n celery_app.send_task('tasks.vulnerability_scan_worker.vulnerability_scan', args=(),\n task_id=celery_task_id, kwargs={\"scan_details\": scan_details},\n queue=constants.VULNERABILITY_SCAN_QUEUE)\n except Exception as ex:\n save_scheduled_task_status(\"Error: \" + str(ex))\n app.logger.error(ex)\n time.sleep(2)\n redis_pipe = redis.pipeline()\n for lock_key in redis_lock_keys:\n redis.delete(lock_key)\n redis_pipe.execute()\n else:\n node_list = []\n redis_lock_keys = []\n redis_pipe = redis.pipeline()\n for node_id in node_action_details[\"node_id_list\"]:\n try:\n node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,\n topology_data_df_format=topology_data_df_format)\n if node.type == constants.NODE_TYPE_HOST:\n lock_key = \"{0}:{1}\".format(constants.NODE_ACTION_CVE_SCAN_START, node.host_name)\n else:\n if not node.image_name_tag:\n continue\n lock_key = \"{0}:{1}\".format(constants.NODE_ACTION_CVE_SCAN_START, node.image_name_tag)\n if lock_key in redis_lock_keys:\n # If same image, different container, already selected, don't scan again\n continue\n redis_lock_keys.append(lock_key)\n redis_pipe.incr(lock_key)\n node_list.append(node)\n except Exception as ex:\n save_scheduled_task_status(\"Error: \" + str(ex))\n app.logger.error(ex)\n if not node_list:\n error_message = \"No node available for scan\"\n save_scheduled_task_status(\"Error: \" + error_message)\n app.logger.error(error_message)\n return\n redis_resp = redis_pipe.execute()\n for i, node in enumerate(node_list):\n if redis_resp[i] != 1:\n continue\n try:\n node.cve_scan_start(node_action_details[\"scan_type\"],\n priority=node_action_details.get(\"priority\", False))\n except Exception as ex:\n save_scheduled_task_status(\"Error: \" + str(ex))\n app.logger.error(ex)\n time.sleep(1)\n redis_pipe = redis.pipeline()\n for lock_key in redis_lock_keys:\n redis.delete(lock_key)\n redis_pipe.execute()\n elif action == constants.NODE_ACTION_CVE_SCAN_STOP:\n if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:\n from config.app import celery_app\n if node_action_details[\"registry_images\"].get(\"all_registry_images\", False):\n image_list_details_str = redis.get(\"{0}:{1}\".format(constants.REGISTRY_IMAGES_CACHE_KEY_PREFIX,\n node_action_details[\"registry_images\"][\n \"registry_id\"]))\n image_dict = json.loads(image_list_details_str)\n node_action_details[\"registry_images\"][\"image_name_with_tag_list\"] = [image[\"image_name_with_tag\"]\n for image in\n image_dict['image_list']]\n for image_name_with_tag in node_action_details[\"registry_images\"][\"image_name_with_tag_list\"]:\n try:\n es_response = ESConn.search_by_and_clause(constants.CVE_SCAN_LOGS_INDEX,\n {\"node_id\": image_name_with_tag}, 0, size=1)\n latest_cve_scan_doc = {}\n cve_scan_list = es_response.get(\"hits\", [])\n if cve_scan_list:\n cve_scan_doc = cve_scan_list[0]\n latest_cve_scan_doc = cve_scan_doc.get('_source', {})\n latest_cve_scan_doc.update({'_id': cve_scan_doc.get('_id', \"\")})\n if latest_cve_scan_doc:\n status = latest_cve_scan_doc.get(\"action\", \"\")\n scan_id = latest_cve_scan_doc.get(\"scan_id\", \"\")\n if (status in constants.CVE_SCAN_NOT_RUNNING_STATUS) or (not scan_id):\n continue\n elif status != constants.CVE_SCAN_STATUS_QUEUED:\n continue\n celery_task_id = \"cve_scan:\" + scan_id\n celery_app.control.revoke(celery_task_id, terminate=False)\n body = {\n \"masked\": \"false\", \"type\": constants.CVE_SCAN_LOGS_INDEX, \"scan_id\": scan_id,\n \"cve_scan_message\": \"Scan stopped by user\", \"time_stamp\": int(time.time() * 1000.0),\n \"@timestamp\": datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"), \"host\": \"\",\n \"action\": constants.CVE_SCAN_STATUS_STOPPED, \"host_name\": \"\",\n \"node_id\": latest_cve_scan_doc.get(\"node_id\", \"\"),\n \"node_type\": constants.NODE_TYPE_CONTAINER_IMAGE\n }\n ESConn.create_doc(constants.CVE_SCAN_LOGS_INDEX, body)\n except Exception as ex:\n save_scheduled_task_status(\"Error: \" + str(ex))\n app.logger.error(ex)\n else:\n for node_id in node_action_details[\"node_id_list\"]:\n try:\n node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,\n topology_data_df_format=topology_data_df_format)\n node.cve_scan_stop()\n except Exception as ex:\n save_scheduled_task_status(\"Error: \" + str(ex))\n app.logger.error(ex)\n elif action == constants.NODE_ACTION_SCHEDULE_SEND_REPORT:\n domain_name = \"\"\n console_url_setting = Setting.query.filter_by(key=\"console_url\").one_or_none()\n if console_url_setting and console_url_setting.value:\n domain_name = console_url_setting.value.get(\"value\")\n report_id = uuid.uuid4()\n body = {\n \"type\": constants.REPORT_INDEX,\n \"report_id\": report_id,\n \"status\": \"started\",\n \"masked\": \"false\",\n \"@timestamp\": datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n }\n ESConn.create_doc(constants.REPORT_INDEX, body, refresh=\"wait_for\")\n if node_action_details.get('include_dead_nodes') is True:\n if node_type == 'host':\n if len(node_action_details['filters'].get('host_name', [])) == 0:\n node_action_details['filters']['host_name'] = get_all_scanned_node()\n from config.app import celery_app\n celery_app.send_task(\n 'tasks.common_worker.generate_report', args=(),\n kwargs={\"report_id\": report_id, \"filters\": node_action_details.get(\"filters\", {}),\n \"lucene_query_string\": \"\",\n \"number\": node_action_details.get(\"duration\", {}).get(\"number\", 0),\n \"time_unit\": node_action_details.get(\"duration\", {}).get(\"time_unit\", \"day\"),\n \"domain_name\": domain_name, \"resources\": node_action_details.get(\"resources\", {}),\n \"file_type\": node_action_details.get(\"file_type\", \"xlsx\"), \"node_type\": node_type,\n \"include_dead_nodes\": node_action_details.get(\"include_dead_nodes\", False),\n \"report_email\": node_action_details[\"report_email\"]})\n return set_response(data=\"Started\")\n elif action == constants.NODE_ACTION_DOWNLOAD_REPORT:\n domain_name = \"\"\n console_url_setting = Setting.query.filter_by(key=\"console_url\").one_or_none()\n if console_url_setting and console_url_setting.value:\n domain_name = console_url_setting.value.get(\"value\")\n report_id = uuid.uuid4()\n body = {\n \"type\": constants.REPORT_INDEX,\n \"report_id\": report_id,\n \"status\": \"started\",\n \"masked\": \"false\",\n \"duration\": \"\",\n \"@timestamp\": datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n }\n ESConn.create_doc(constants.REPORT_INDEX, body, refresh=\"wait_for\")\n if node_action_details.get('include_dead_nodes') is True:\n if node_type == 'host':\n if len(node_action_details['filters'].get('host_name', [])) == 0:\n node_action_details['filters']['host_name'] = get_all_scanned_node()\n from config.app import celery_app\n celery_app.send_task(\n 'tasks.common_worker.generate_report', args=(),\n kwargs={\"report_id\": report_id, \"filters\": node_action_details.get(\"filters\", {}),\n \"lucene_query_string\": \"\",\n \"number\": node_action_details.get(\"duration\", {}).get(\"number\", 0),\n \"time_unit\": node_action_details.get(\"duration\", {}).get(\"time_unit\", \"d\"),\n \"domain_name\": domain_name, \"resources\": node_action_details.get(\"resources\", {}),\n \"file_type\": node_action_details.get(\"file_type\", \"xlsx\"), \"node_type\": node_type,\n \"include_dead_nodes\": node_action_details.get(\"include_dead_nodes\", False),\n \"report_email\": \"\"})\n return set_response(data=\"Started\")\n save_scheduled_task_status(\"Success\")\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame" ] ]
anonymous-cv/cvpr-sub
[ "6307520c73716de73ef63f5239bdac8dda20da41" ]
[ "test_pretrain.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport os\nimport time\nimport argparse\nimport sys\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom tqdm import tqdm\n\nfrom network.BEV_Unet import BEV_Unet\nfrom network.ptBEV import ptBEVnet\nfrom dataloader.dataset import collate_fn_BEV,collate_fn_BEV_test,SemKITTI,SemKITTI_label_name,spherical_dataset,voxel_dataset\n#ignore weird np warning\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef fast_hist(pred, label, n):\n k = (label >= 0) & (label < n)\n bin_count=np.bincount(\n n * label[k].astype(int) + pred[k], minlength=n ** 2)\n return bin_count[:n ** 2].reshape(n, n)\n\ndef per_class_iu(hist):\n return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))\n\ndef fast_hist_crop(output, target, unique_label):\n hist = fast_hist(output.flatten(), target.flatten(), np.max(unique_label)+1)\n hist=hist[unique_label,:]\n hist=hist[:,unique_label]\n return hist\n\ndef SemKITTI2train(label):\n if isinstance(label, list):\n return [SemKITTI2train_single(a) for a in label]\n else:\n return SemKITTI2train_single(label)\n\ndef SemKITTI2train_single(label):\n remove_ind = label == 0\n label -= 1\n label[remove_ind] = 255\n return label\n\ndef train2SemKITTI(input_label):\n # delete 0 label\n new_labels=np.copy(input_label)\n new_labels[input_label==255]=0\n for label_num in range(0,19):\n new_labels[input_label==label_num]=label_num+1\n return new_labels\n\ndef main(args):\n data_path = args.data_dir\n test_batch_size = args.test_batch_size\n model_save_path = args.model_save_path\n output_path = args.test_output_path\n compression_model = args.grid_size[2]\n grid_size = args.grid_size\n pytorch_device = torch.device('cuda:0')\n model = args.model\n if model == 'polar':\n fea_dim = 9\n circular_padding = True\n elif model == 'traditional':\n fea_dim = 7\n circular_padding = False\n\n # prepare miou fun\n unique_label=np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1\n unique_label_str=[SemKITTI_label_name[x] for x in unique_label+1]\n\n # prepare model\n my_BEV_model=BEV_Unet(n_class=len(unique_label), n_height = compression_model, input_batch_norm = True, dropout = 0.5, circular_padding = circular_padding)\n my_model = ptBEVnet(my_BEV_model, pt_model = 'pointnet', grid_size = grid_size, fea_dim = fea_dim, max_pt_per_encode = 256,\n out_pt_fea_dim = 512, kernal_size = 1, pt_selection = 'random', fea_compre = compression_model)\n if os.path.exists(model_save_path):\n my_model.load_state_dict(torch.load(model_save_path))\n my_model.to(pytorch_device)\n\n # prepare dataset\n test_pt_dataset = SemKITTI(data_path + '/sequences/', imageset = 'test', return_ref = True)\n val_pt_dataset = SemKITTI(data_path + '/sequences/', imageset = 'val', return_ref = True)\n if model == 'polar':\n test_dataset=spherical_dataset(test_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True, return_test= True)\n val_dataset=spherical_dataset(val_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True)\n elif model == 'traditional':\n test_dataset=voxel_dataset(test_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True, return_test= True)\n val_dataset=voxel_dataset(val_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True)\n test_dataset_loader = torch.utils.data.DataLoader(dataset = test_dataset,\n batch_size = test_batch_size,\n collate_fn = collate_fn_BEV_test,\n shuffle = False,\n num_workers = 4)\n val_dataset_loader = torch.utils.data.DataLoader(dataset = val_dataset,\n batch_size = test_batch_size,\n collate_fn = collate_fn_BEV,\n shuffle = False,\n num_workers = 4)\n\n # validation\n print('*'*80)\n print('Test network performance on validation split')\n print('*'*80)\n pbar = tqdm(total=len(val_dataset_loader))\n my_model.eval()\n hist_list = []\n time_list = []\n with torch.no_grad():\n for i_iter_val,(_,val_vox_label,val_grid,val_pt_labs,val_pt_fea) in enumerate(val_dataset_loader):\n val_vox_label = SemKITTI2train(val_vox_label)\n val_pt_labs = SemKITTI2train(val_pt_labs)\n val_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in val_pt_fea]\n val_grid_ten = [torch.from_numpy(i[:,:2]).to(pytorch_device) for i in val_grid]\n val_label_tensor=val_vox_label.type(torch.LongTensor).to(pytorch_device)\n\n torch.cuda.synchronize()\n start_time = time.time()\n predict_labels = my_model(val_pt_fea_ten, val_grid_ten)\n torch.cuda.synchronize()\n time_list.append(time.time()-start_time)\n\n predict_labels = torch.argmax(predict_labels,dim=1)\n predict_labels = predict_labels.cpu().detach().numpy()\n for count,i_val_grid in enumerate(val_grid):\n hist_list.append(fast_hist_crop(predict_labels[count,val_grid[count][:,0],val_grid[count][:,1],val_grid[count][:,2]],val_pt_labs[count],unique_label))\n pbar.update(1)\n iou = per_class_iu(sum(hist_list))\n print('Validation per class iou: ')\n for class_name, class_iou in zip(unique_label_str,iou):\n print('%s : %.2f%%' % (class_name, class_iou*100))\n val_miou = np.nanmean(iou) * 100\n del val_vox_label,val_grid,val_pt_fea,val_grid_ten\n pbar.close()\n print('Current val miou is %.3f ' % val_miou)\n print('Inference time per %d is %.4f seconds\\n' %\n (test_batch_size,np.mean(time_list)))\n \n # test\n print('*'*80)\n print('Generate predictions for test split')\n print('*'*80)\n pbar = tqdm(total=len(test_dataset_loader))\n for i_iter_test,(_,_,test_grid,_,test_pt_fea,test_index) in enumerate(test_dataset_loader):\n # predict\n test_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in test_pt_fea]\n test_grid_ten = [torch.from_numpy(i[:,:2]).to(pytorch_device) for i in test_grid]\n\n predict_labels = my_model(test_pt_fea_ten,test_grid_ten)\n predict_labels = torch.argmax(predict_labels,1)\n predict_labels = predict_labels.cpu().detach().numpy()\n # write to label file\n for count,i_test_grid in enumerate(test_grid):\n test_pred_label = predict_labels[count,test_grid[count][:,0],test_grid[count][:,1],test_grid[count][:,2]]\n test_pred_label = train2SemKITTI(test_pred_label)\n test_pred_label = np.expand_dims(test_pred_label,axis=1)\n save_dir = test_pt_dataset.im_idx[test_index[count]]\n _,dir2 = save_dir.split('/sequences/',1)\n new_save_dir = output_path + '/sequences/' +dir2.replace('velodyne','predictions')[:-3]+'label'\n if not os.path.exists(os.path.dirname(new_save_dir)):\n try:\n os.makedirs(os.path.dirname(new_save_dir))\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n test_pred_label = test_pred_label.astype(np.uint32)\n test_pred_label.tofile(new_save_dir)\n pbar.update(1)\n del test_grid,test_pt_fea,test_index\n pbar.close()\n print('Predicted test labels are saved in %s. Need to be shifted to original label format before submitting to the Competition website.' % output_path)\n print('Remap script can be found in semantic-kitti-api.')\n\nif __name__ == '__main__':\n # Testing settings\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-d', '--data_dir', default='data')\n parser.add_argument('-p', '--model_save_path', default='pretained_weight/SemKITTI_PolarSeg.pt')\n parser.add_argument('-o', '--test_output_path', default='out/SemKITTI_test')\n parser.add_argument('-m', '--model', choices=['polar','traditional'], default='polar', help='training model: polar or traditional (default: polar)')\n parser.add_argument('-s', '--grid_size', nargs='+', type=int, default = [480,360,32], help='grid size of BEV representation (default: [480,360,32])')\n parser.add_argument('--test_batch_size', type=int, default=1, help='batch size for training (default: 1)')\n \n args = parser.parse_args()\n if not len(args.grid_size) == 3:\n raise Exception('Invalid grid size! Grid size should have 3 dimensions.')\n\n print(' '.join(sys.argv))\n print(args)\n main(args)" ]
[ [ "torch.utils.data.DataLoader", "torch.load", "numpy.diag", "numpy.nanmean", "torch.argmax", "torch.no_grad", "torch.cuda.synchronize", "numpy.copy", "numpy.max", "numpy.expand_dims", "torch.from_numpy", "torch.device", "numpy.mean" ] ]
zblumen/stellargraph
[ "10e62006907dd5968286f33648d1054e9c961c1b" ]
[ "stellargraph/mapper/mini_batch_node_generators.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright 2018-2020 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nMappers to provide input data for the graph models in layers.\n\n\"\"\"\n__all__ = [\"ClusterNodeGenerator\", \"ClusterNodeSequence\"]\n\nimport random\nimport copy\nimport numpy as np\nimport networkx as nx\nfrom tensorflow.keras.utils import Sequence\n\nfrom scipy import sparse\nfrom ..core.graph import StellarGraph\nfrom ..core.utils import is_real_iterable\n\n\nclass ClusterNodeGenerator:\n \"\"\"\n A data generator for use with ClusterGCN models on homogeneous graphs, [1].\n\n The supplied graph G should be a StellarGraph object that is ready for\n machine learning. Currently the model requires node features to be available for all\n nodes in the graph.\n Use the :meth:`flow` method supplying the nodes and (optionally) targets\n to get an object that can be used as a Keras data generator.\n\n This generator will supply the features array and the adjacency matrix to a\n mini-batch Keras graph ML model.\n\n [1] `W. Chiang, X. Liu, S. Si, Y. Li, S. Bengio, C. Hsieh, 2019 <https://arxiv.org/abs/1905.07953>`_.\n\n For more information, please see the ClusterGCN demo:\n `<https://github.com/stellargraph/stellargraph/blob/master/demos/>`_\n\n Args:\n G (StellarGraph): a machine-learning StellarGraph-type graph\n clusters (int or list): If int then it indicates the number of clusters (default is 1 that is the given graph).\n If clusters is greater than 1, then nodes are uniformly at random assigned to a cluster. If list,\n then it should be a list of lists of node IDs such that each list corresponds to a cluster of nodes\n in G. The clusters should be non-overlapping.\n q (float): The number of clusters to combine for each mini-batch. The default is 1.\n lam (float): The mixture coefficient for adjacency matrix normalisation.\n name (str): an optional name of the generator\n \"\"\"\n\n def __init__(self, G, clusters=1, q=1, lam=0.1, name=None):\n\n if not isinstance(G, StellarGraph):\n raise TypeError(\"Graph must be a StellarGraph or StellarDiGraph object.\")\n\n self.graph = G\n self.name = name\n self.q = q # The number of clusters to sample per mini-batch\n self.lam = lam\n self.clusters = clusters\n\n if isinstance(clusters, list):\n self.k = len(clusters)\n elif isinstance(clusters, int):\n if clusters <= 0:\n raise ValueError(\n \"{}: clusters must be greater than 0.\".format(type(self).__name__)\n )\n self.k = clusters\n else:\n raise TypeError(\n \"{}: clusters must be either int or list type.\".format(\n type(self).__name__\n )\n )\n\n # Some error checking on the given parameter values\n if not isinstance(lam, float):\n raise TypeError(\"{}: lam must be a float type.\".format(type(self).__name__))\n\n if lam < 0 or lam > 1:\n raise ValueError(\n \"{}: lam must be in the range [0, 1].\".format(type(self).__name__)\n )\n\n if not isinstance(q, int):\n raise TypeError(\"{}: q must be integer type.\".format(type(self).__name__))\n\n if q <= 0:\n raise ValueError(\n \"{}: q must be greater than 0.\".format(type(self).__name__)\n )\n\n if self.k % q != 0:\n raise ValueError(\n \"{}: the number of clusters must be exactly divisible by q.\".format(\n type(self).__name__\n )\n )\n\n # Check if the graph has features\n G.check_graph_for_ml()\n\n self.node_list = list(G.nodes())\n\n # Check that there is only a single node type\n if len(G.node_types) > 1:\n raise ValueError(\n \"{}: node generator requires graph with single node type; \"\n \"a graph with multiple node types is passed. Stopping.\".format(\n type(self).__name__\n )\n )\n\n if isinstance(clusters, int):\n # We are not given graph clusters.\n # We are going to split the graph into self.k random clusters\n all_nodes = list(G.nodes())\n random.shuffle(all_nodes)\n cluster_size = len(all_nodes) // self.k\n self.clusters = [\n all_nodes[i : i + cluster_size]\n for i in range(0, len(all_nodes), cluster_size)\n ]\n if len(self.clusters) > self.k:\n # for the case that the number of nodes is not exactly divisible by k, we combine\n # the last cluster with the second last one\n self.clusters[-2].extend(self.clusters[-1])\n del self.clusters[-1]\n\n print(f\"Number of clusters {self.k}\")\n for i, c in enumerate(self.clusters):\n print(f\"{i} cluster has size {len(c)}\")\n\n # Get the features for the nodes\n self.features = G.node_features(self.node_list)\n\n def flow(self, node_ids, targets=None, name=None):\n \"\"\"\n Creates a generator/sequence object for training, evaluation, or prediction\n with the supplied node ids and numeric targets.\n\n Args:\n node_ids (iterable): an iterable of node ids for the nodes of interest\n (e.g., training, validation, or test set nodes)\n targets (2d array, optional): a 2D array of numeric node targets with shape `(len(node_ids),\n target_size)`\n name (str, optional): An optional name for the returned generator object.\n\n Returns:\n A ClusterNodeSequence object to use with ClusterGCN in Keras\n methods :meth:`fit_generator`, :meth:`evaluate_generator`, and :meth:`predict_generator`\n\n \"\"\"\n if targets is not None:\n # Check targets is an iterable\n if not is_real_iterable(targets):\n raise TypeError(\n \"{}: Targets must be an iterable or None\".format(\n type(self).__name__\n )\n )\n\n # Check targets correct shape\n if len(targets) != len(node_ids):\n raise ValueError(\n \"{}: Targets must be the same length as node_ids\".format(\n type(self).__name__\n )\n )\n\n return ClusterNodeSequence(\n self.graph,\n self.clusters,\n targets=targets,\n node_ids=node_ids,\n q=self.q,\n lam=self.lam,\n name=name,\n )\n\n\nclass ClusterNodeSequence(Sequence):\n \"\"\"\n A Keras-compatible data generator for node inference using ClusterGCN model.\n Use this class with the Keras methods :meth:`keras.Model.fit_generator`,\n :meth:`keras.Model.evaluate_generator`, and\n :meth:`keras.Model.predict_generator`,\n\n This class should be created using the `.flow(...)` method of\n :class:`ClusterNodeGenerator`.\n\n Args:\n graph (StellarGraph): The graph\n clusters (list): A list of lists such that each sub-list indicates the nodes in a cluster.\n The length of this list, len(clusters) indicates the number of batches in one epoch.\n targets (np.ndarray, optional): An optional array of node targets of size (N x C),\n where C is the target size (e.g., number of classes for one-hot class targets)\n node_ids (iterable, optional): The node IDs for the target nodes. Required if targets is not None.\n normalize_adj (bool, optional): Specifies whether the adjacency matrix for each mini-batch should\n be normalized or not. The default is True.\n q (int, optional): The number of subgraphs to combine for each batch. The default value is\n 1 such that the generator treats each subgraph as a batch.\n lam (float, optional): The mixture coefficient for adjacency matrix normalisation (the\n 'diagonal enhancement' method). Valid values are in the interval [0, 1] and the default value is 0.1.\n name (str, optional): An optional name for this generator object.\n \"\"\"\n\n def __init__(\n self,\n graph,\n clusters,\n targets=None,\n node_ids=None,\n normalize_adj=True,\n q=1,\n lam=0.1,\n name=None,\n ):\n\n self.name = name\n self.clusters = list()\n self.clusters_original = copy.deepcopy(clusters)\n self.graph = graph\n self.node_list = list(graph.nodes())\n self.normalize_adj = normalize_adj\n self.q = q\n self.lam = lam\n self.node_order = list()\n self._node_order_in_progress = list()\n self.__node_buffer = dict()\n self.target_ids = list()\n\n if len(clusters) % self.q != 0:\n raise ValueError(\n \"The number of clusters should be exactly divisible by q. However, {} number of clusters is not exactly divisible by {}.\".format(\n len(clusters), q\n )\n )\n\n if node_ids is not None:\n self.target_ids = list(node_ids)\n\n if targets is not None:\n if node_ids is None:\n raise ValueError(\n \"Since targets is not None, node_ids must be given and cannot be None.\"\n )\n\n if len(node_ids) != len(targets):\n raise ValueError(\n \"When passed together targets and indices should be the same length.\"\n )\n\n self.targets = np.asanyarray(targets)\n self.target_node_lookup = dict(\n zip(self.target_ids, range(len(self.target_ids)))\n )\n else:\n self.targets = None\n\n self.on_epoch_end()\n\n def __len__(self):\n num_batches = len(self.clusters_original) // self.q\n return num_batches\n\n def __getitem__(self, index):\n # The next batch should be the adjacency matrix for the cluster and the corresponding feature vectors\n # and targets if available.\n cluster = self.clusters[index]\n adj_cluster = self.graph.to_adjacency_matrix(cluster)\n\n # The operations to normalize the adjacency matrix are too slow.\n # Either optimize this or implement as a layer(?)\n if self.normalize_adj:\n # add self loops\n adj_cluster.setdiag(1) # add self loops\n degree_matrix_diag = 1.0 / (adj_cluster.sum(axis=1) + 1)\n degree_matrix_diag = np.squeeze(np.asarray(degree_matrix_diag))\n degree_matrix = sparse.lil_matrix(adj_cluster.shape)\n degree_matrix.setdiag(degree_matrix_diag)\n adj_cluster = degree_matrix.tocsr() @ adj_cluster\n adj_cluster.setdiag((1.0 + self.lam) * adj_cluster.diagonal())\n\n adj_cluster = adj_cluster.toarray()\n\n g_node_list = list(cluster)\n\n # Determine the target nodes that exist in this cluster\n target_nodes_in_cluster = np.asanyarray(\n list(set(g_node_list).intersection(self.target_ids))\n )\n\n self.__node_buffer[index] = target_nodes_in_cluster\n\n # Dictionary to store node indices for quicker node index lookups\n node_lookup = dict(zip(g_node_list, range(len(g_node_list))))\n\n # The list of indices of the target nodes in self.node_list\n target_node_indices = np.array(\n [node_lookup[n] for n in target_nodes_in_cluster]\n )\n\n if index == (len(self.clusters_original) // self.q) - 1:\n # last batch\n self.__node_buffer_dict_to_list()\n\n cluster_targets = None\n #\n if self.targets is not None:\n # Dictionary to store node indices for quicker node index lookups\n # The list of indices of the target nodes in self.node_list\n cluster_target_indices = np.array(\n [self.target_node_lookup[n] for n in target_nodes_in_cluster]\n )\n cluster_targets = self.targets[cluster_target_indices]\n cluster_targets = cluster_targets.reshape((1,) + cluster_targets.shape)\n\n features = self.graph.node_features(g_node_list)\n\n features = np.reshape(features, (1,) + features.shape)\n adj_cluster = adj_cluster.reshape((1,) + adj_cluster.shape)\n target_node_indices = target_node_indices[np.newaxis, np.newaxis, :]\n\n return [features, target_node_indices, adj_cluster], cluster_targets\n\n def __node_buffer_dict_to_list(self):\n self.node_order = []\n for k, v in self.__node_buffer.items():\n self.node_order.extend(v)\n\n def on_epoch_end(self):\n \"\"\"\n Shuffle all nodes at the end of each epoch\n \"\"\"\n if self.q > 1:\n # combine clusters\n cluster_indices = list(range(len(self.clusters_original)))\n random.shuffle(cluster_indices)\n self.clusters = []\n\n for i in range(0, len(cluster_indices) - 1, self.q):\n cc = cluster_indices[i : i + self.q]\n tmp = []\n for l in cc:\n tmp.extend(list(self.clusters_original[l]))\n self.clusters.append(tmp)\n else:\n self.clusters = copy.deepcopy(self.clusters_original)\n\n self.__node_buffer = dict()\n\n random.shuffle(self.clusters)\n" ]
[ [ "numpy.reshape", "numpy.asarray", "numpy.asanyarray", "scipy.sparse.lil_matrix", "numpy.array" ] ]
robbjr/datasets
[ "fbb2af9d0e88f8e2ae884e9764fbeff2ee487813" ]
[ "tensorflow_datasets/testing/mocking.py" ]
[ "# coding=utf-8\n# Copyright 2019 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Mock util for tfds.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport os\nimport random\n\nfrom absl.testing import absltest\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_datasets.core import features as features_lib\n\n\[email protected]\ndef mock_data(num_examples=1, as_dataset_fn=None, data_dir=None):\n \"\"\"Mock tfds to generate random data.\n\n This function requires the true metadata files (dataset_info.json, label.txt,\n vocabulary files) to be stored in `data_dir/dataset_name/version`, as they\n would be for the true dataset.\n The actual examples will be randomly generated using\n `builder.info.features.get_tensor_info()`.\n Download and prepare step will be skipped.\n\n Warning: As the mocked builder will use the true metadata (label names,...),\n the `info.split['train'].num_examples` won't match `len(list(ds_train))`.\n\n Usage (automated):\n\n ```\n with mock_data(num_examples=5):\n ds = tfds.load('some_dataset', split='train')\n\n for ex in ds: # ds will yield randomly generated examples.\n ex\n ```\n\n If you want more fine grain control over the generated examples, you can\n manually overwrite the `DatasetBuilder._as_dataset` method.\n Usage (manual):\n\n ```\n def as_dataset(self, *args, **kwargs):\n return tf.data.Dataset.from_generator(\n lambda: ({\n 'image': np.ones(shape=(28, 28, 1), dtype=np.uint8),\n 'label': i % 10,\n } for i in range(num_examples)),\n output_types=self.info.features.dtype,\n output_shapes=self.info.features.shape,\n )\n\n with mock_data(as_dataset_fn=as_dataset):\n ds = tfds.load('some_dataset', split='train')\n\n for ex in ds: # ds will yield the fake data example of 'as_dataset'.\n ex\n ```\n\n Args:\n num_examples: `int`, the number of fake example to generate.\n as_dataset_fn: if provided, will replace the default random example\n generator. This function mock the `FileAdapterBuilder._as_dataset`\n data_dir: `str`, `data_dir` folder from where to load the metadata.\n Will overwrite `data_dir` kwargs from `tfds.load`.\n\n Yields:\n None\n \"\"\"\n\n def mock_download_and_prepare(self, *args, **kwargs):\n del args\n del kwargs\n if not tf.io.gfile.exists(self._data_dir): # pylint: disable=protected-access\n raise ValueError(\n 'TFDS has been mocked, but metadata files where not found in {}. '\n 'You should copy the real metadata files, so that the dataset '\n 'can be loaded properly, or set the data_dir kwarg of'\n 'tfds.testing.mock_tfds(data_dir=...).'\n ''.format(self._data_dir) # pylint: disable=protected-access\n )\n\n def mock_as_dataset(self, *args, **kwargs):\n del args\n del kwargs\n ds = tf.data.Dataset.from_generator(\n lambda: (_generate_random_example(self) for _ in range(num_examples)),\n output_types=self.info.features.dtype,\n output_shapes=self.info.features.shape,\n )\n return ds\n\n if not as_dataset_fn:\n as_dataset_fn = mock_as_dataset\n\n if not data_dir:\n data_dir = os.path.join(os.path.dirname(__file__), 'metadata')\n\n download_and_prepare_path = 'tensorflow_datasets.core.dataset_builder.DatasetBuilder.download_and_prepare'\n as_dataset_path = 'tensorflow_datasets.core.dataset_builder.FileAdapterBuilder._as_dataset'\n data_dir_path = 'tensorflow_datasets.core.constants.DATA_DIR'\n\n with absltest.mock.patch(as_dataset_path, as_dataset_fn), \\\n absltest.mock.patch(\n download_and_prepare_path, mock_download_and_prepare), \\\n absltest.mock.patch(data_dir_path, data_dir):\n yield\n\n\ndef _generate_random_array(feature, tensor_info):\n \"\"\"Generates a random tensor for a single feature.\"\"\"\n # TODO(tfds): Could improve the fake generatiion:\n # * Use the feature statistics (min, max)\n # * For Sequence features\n # * For Text\n shape = [ # Fill dynamic shape with random values\n np.random.randint(5, 50) if s is None else s\n for s in tensor_info.shape\n ]\n if isinstance(feature, features_lib.ClassLabel):\n max_value = feature.num_classes\n elif isinstance(feature, features_lib.Text) and feature.vocab_size:\n max_value = feature.vocab_size\n else:\n max_value = 255\n\n # Generate some random values, depending on the dtype\n if tensor_info.dtype.is_integer:\n return np.random.randint(0, max_value, shape)\n elif tensor_info.dtype.is_floating:\n return np.random.random_sample(shape)\n elif tensor_info.dtype == tf.string:\n return ''.join(\n random.choice(' abcdefghij') for _ in range(random.randint(10, 20)))\n else:\n raise ValueError('Fake generation not supported for {}'.format(\n tensor_info.dtype))\n\n\ndef _generate_random_example(builder):\n root_feature = builder.info.features\n flat_features = root_feature._flatten(root_feature) # pylint: disable=protected-access\n flat_tensor_info = root_feature._flatten(root_feature.get_tensor_info()) # pylint: disable=protected-access\n flat_np = [\n _generate_random_array(feature, tensor_info)\n for feature, tensor_info in zip(flat_features, flat_tensor_info)\n ]\n return root_feature._nest(flat_np) # pylint: disable=protected-access\n" ]
[ [ "tensorflow.io.gfile.exists", "numpy.random.random_sample", "numpy.random.randint" ] ]